diff --git a/.github/images/actions.png b/.github/images/actions.png new file mode 100644 index 0000000..fffa400 Binary files /dev/null and b/.github/images/actions.png differ diff --git a/.github/images/approve-pr.png b/.github/images/approve-pr.png new file mode 100644 index 0000000..95088fd Binary files /dev/null and b/.github/images/approve-pr.png differ diff --git a/.github/images/logs.png b/.github/images/logs.png new file mode 100644 index 0000000..cafd520 Binary files /dev/null and b/.github/images/logs.png differ diff --git a/.github/images/new-branch.png b/.github/images/new-branch.png new file mode 100644 index 0000000..0df7dea Binary files /dev/null and b/.github/images/new-branch.png differ diff --git a/.github/images/new-pr.png b/.github/images/new-pr.png new file mode 100644 index 0000000..c858b55 Binary files /dev/null and b/.github/images/new-pr.png differ diff --git a/.github/images/run_wf.png b/.github/images/run_wf.png new file mode 100644 index 0000000..69f8e4e Binary files /dev/null and b/.github/images/run_wf.png differ diff --git a/.github/images/workflow.png b/.github/images/workflow.png new file mode 100644 index 0000000..8aa24cd Binary files /dev/null and b/.github/images/workflow.png differ diff --git a/.github/workflows/delete_old_workflows.yml b/.github/workflows/delete_old_workflows.yml new file mode 100644 index 0000000..32a786d --- /dev/null +++ b/.github/workflows/delete_old_workflows.yml @@ -0,0 +1,57 @@ +name: Delete old workflow runs +on: + workflow_dispatch: + inputs: + days: + description: 'Number of days.' + required: true + default: 30 + minimum_runs: + description: 'The minimum runs to keep for each workflow.' + required: true + default: 6 + delete_workflow_pattern: + description: 'The name or filename of the workflow. if not set then it will target all workflows.' + required: false + delete_workflow_by_state_pattern: + description: 'Remove workflow by state: active, deleted, disabled_fork, disabled_inactivity, disabled_manually' + required: true + default: "All" + type: choice + options: + - "All" + - active + - deleted + - disabled_inactivity + - disabled_manually + delete_run_by_conclusion_pattern: + description: 'Remove workflow by conclusion: action_required, cancelled, failure, skipped, success' + required: true + default: "All" + type: choice + options: + - "All" + - action_required + - cancelled + - failure + - skipped + - success + dry_run: + description: 'Only log actions, do not perform any delete operations.' + required: false + +jobs: + del_runs: + runs-on: ubuntu-latest + steps: + - name: Delete workflow runs + uses: Mattraks/delete-workflow-runs@v2 + with: + token: ${{ github.token }} + repository: ${{ github.repository }} + retain_days: ${{ github.event.inputs.days }} + keep_minimum_runs: ${{ github.event.inputs.minimum_runs }} + delete_workflow_pattern: ${{ github.event.inputs.delete_workflow_pattern }} + delete_workflow_by_state_pattern: ${{ github.event.inputs.delete_workflow_by_state_pattern }} + delete_run_by_conclusion_pattern: ${{ github.event.inputs.delete_run_by_conclusion_pattern }} + dry_run: ${{ github.event.inputs.dry_run }} diff --git a/.github/workflows/trigger-github-actions.yml b/.github/workflows/trigger-github-actions.yml new file mode 100644 index 0000000..4507cce --- /dev/null +++ b/.github/workflows/trigger-github-actions.yml @@ -0,0 +1,39 @@ +name: Trigger Github Actions + +on: + pull_request_review: + types: [submitted] + +env: + GH_TOKEN: ${{ github.token }} + BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + +jobs: + call-python-workflow: + if: ${{ github.event.review.state == 'approved' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: tj-actions/changed-files@v35 + id: changed_files + with: + json: "true" + - name: Print changed files + run: | + echo "Changed files: ${{ steps.changed_files.outputs.all_changed_files }}" + - name: Call imaging if new-site config file changed + if: contains(steps.changed_files.outputs.all_changed_files, 'config/new-site.json') + run: | + gh workflow run trigger-python-workflows.yml -f file=config/new-site.json -f workflow=imaging -f branch=${{ env.BRANCH_NAME }} + - name: Call cluster-config if pod-config file changed + if: contains(steps.changed_files.outputs.all_changed_files, 'config/pod-config.yml') + run: | + gh workflow run trigger-python-workflows.yml -f file=config/pod-config.yml -f workflow=pod-config -f branch=${{ env.BRANCH_NAME }} + - name: Call vm-workloads if create_vm_workloads file changed + if: contains(steps.changed_files.outputs.all_changed_files, 'config/create-vm-workloads.json') + run: | + gh workflow run trigger-python-workflows.yml -f file=config/create_vm_workloads.json -f workflow=create-vm-workloads -f branch=${{ env.BRANCH_NAME }} + - name: Call edge-ai-workloads if edge_ai file changed + if: contains(steps.changed_files.outputs.all_changed_files, 'config/edge-ai.json') + run: | + gh workflow run trigger-python-workflows.yml -f file=config/edge-ai.json -f workflow=calm-edgeai-vm-workload -f branch=${{ env.BRANCH_NAME }} diff --git a/.github/workflows/trigger-python-workflows.yml b/.github/workflows/trigger-python-workflows.yml new file mode 100644 index 0000000..7be8205 --- /dev/null +++ b/.github/workflows/trigger-python-workflows.yml @@ -0,0 +1,76 @@ +name: Trigger Python Workflows +on: + workflow_dispatch: + inputs: + file: + description: 'Input file' + required: true + branch: + description: 'Branch name where the config file resides' + required: true + workflow: + description: 'Workflow to run' + required: true + default: "pod-config" + type: choice + options: + - "pod-config" + - "imaging" + - "create-vm-workloads" + - "calm-edgeai-vm-workload" + +jobs: + create-python-environment: + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.inputs.branch }} + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + # gives you either a new virtualenv, or restores an old one based on + # the requirements*.txt -file. + - uses: syphar/restore-virtualenv@v1 + id: cache-virtualenv + - uses: syphar/restore-pip-download-cache@v1 + if: steps.cache-virtualenv.outputs.cache-hit != 'true' + # the package installation will only be executed when the + # requirements*.txt - file has changed. + - name: Install dependencies + run: | + python -m pip install --upgrade pip + cd framework + pip install -r requirements/dev.txt + if: steps.cache-virtualenv.outputs.cache-hit != 'true' + run-python-workflow: + needs: create-python-environment + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.inputs.branch }} + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Restore cache + uses: syphar/restore-virtualenv@v1 + id: cache-virtualenv + - name: Run python script + shell: bash + run: | + cd framework + python main.py --workflow ${{ github.event.inputs.workflow }} --file ${{ github.event.inputs.file }} + - name: Git add config and logs + run: | + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git add sites/* + git commit -a -m "Workflow trigger logs" + - name: Push the config and logs to the trigger branch + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.event.inputs.branch }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..926f609 --- /dev/null +++ b/.gitignore @@ -0,0 +1,97 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Mac OS +.DS_Store +.idea + +# Development +publish_to_marketplace.log + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version +pyvenv.cfg + +# celery beat schedule file +celerybeat-schedule + + +# virtualenv +.venv/ +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# others +framework/script_log.log diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..bd48de5 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +dev: + # Setup our python based virtualenv + # This step assumes python3 is installed on your dev machine as python + [ -f venv/bin/python ] || (python -m venv venv && \ + venv/bin/pip install --upgrade pip setuptools) + cd framework && \ + ../venv/bin/pip install --no-cache -r requirements/dev.txt + +test: dev + # In progress + pytest diff --git a/README.md b/README.md index 6cda7e9..b7a1480 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,39 @@ # Zero Touch Framework -A tool used to automate end to end deployment and configuration of Nutanix Cloud Platform without human intervention, hence the name Zero Touch. The tool can also be extended to manage Day-1 and Day-2 operations as well. +A tool used to automate end to end deployment and configuration of Nutanix Cloud Platform without human intervention, +hence the name Zero Touch. The tool can also be extended to manage Day-1 and Day-2 operations as well. ## Usage +### Prerequisites +- Foundation Central configured such that it can access the networks nodes are discoverable on. + - For how to set up Foundation Central and provide the API key via your DHCP server, refer to the [Foundation Central Guide](https://portal.nutanix.com/page/documents/details?targetId=Foundation-Central-v1_5:Foundation-Central-v1_5) +- GitHub Runner in the environment to run the workflows locally. + This tool can be used in two (2) modes: -1. **GitOps** - >This tool works as GitOps, i.e using this repository to manage Infrastructure as a Code (IaaC). Each operation is defined as a workflow, and these workflows can be directly triggered from Github through various actions. - Click [here](config/README.md) to read more about triggering Github pipelines. -2. **Dev Mode** - >This tool can also be setup locally in any popular OS and trigger the workflows manually. Look at the below section for Dev Setup. +1. **Dev Mode** + > This tool can also be setup locally in any popular OS and trigger the workflows manually. Look at the below section + for Dev Setup. + +2. **GitOps** + > This tool works as GitOps, i.e using this repository to manage Infrastructure as a Code (IaaC). Each operation is + defined as a workflow, and these workflows can be directly triggered from Github through various actions. + Click [here](config/README.md) to read more about triggering Github pipelines. + ## Dev Mode Setup -- Ensure Python version >= 3.10 (You can use [pyenv](https://realpython.com/intro-to-pyenv/) to manage multiple Python versions) - - If you are using pyenv, run this inside the project to use a specific version of Python. - ```sh - > pyenv local 3.10.8 - ``` -- `make dev` to create/ use python venv (virtual environment) in $TOPDIR/venv and setup dev environment. Activate it by calling source `venv/bin/activate`. Use deactivate to `deactivate` the venv. +- Ensure Python version >= 3.10 (You can use [pyenv](https://realpython.com/intro-to-pyenv/) to manage multiple Python + versions) + - If you are using pyenv, run this inside the project to use a specific version of Python. + ```sh + > pyenv local 3.10.8 + ``` +- `make dev` to create/ use python venv (virtual environment) in $TOPDIR/venv and setup dev environment. Activate it by + calling source `venv/bin/activate`. Use deactivate to `deactivate` the venv. -## Framework Usage +### Framework Usage ```sh > cd framework && python main.py --help @@ -36,28 +48,38 @@ options: --debug ``` -## Existing Workflows and Input Files - -As you see above, the framework expects two parameters, `WORKFLOW` and `FILE`. +### Existing Workflows and Input Files +As mentioned above, the framework expects two parameters, `WORKFLOW` and `FILE`. -The framework is designed to be triggered by different **_functional workflows_**. These workflows call one or many scripts behind the scenes to accomplish the task. +The framework is designed to be triggered by different **_functional workflows_**. These workflows call one or many +scripts behind the scenes to accomplish the task. The supported workflows are: + - `imaging` - This will trigger the Imaging of nodes using Foundation Central and creates Cluster/s. + Click [here](#prerequisites-for-imaging-workflow) to read prerequisite for imaging. - `pod-config` - This will facilitate Pod configuration (including AZ and Cluster configurations) in parallel. -- `calm-vm-workloads` - This will use calm-dsl to create VM workloads in Self-Service from single or multiple calm-dsl file/s. -- `calm-edgeai-vm-workload` - This will use calm-dsl to create Edge-AI VM workload in Self-Service from single or multiple calm-dsl file/s. +- `calm-vm-workloads` - This will use calm-dsl to create VM workloads in Self-Service from single or multiple calm-dsl + file/s. +- `calm-edgeai-vm-workload` - This will use calm-dsl to create Edge-AI VM workload in Self-Service from single or + multiple calm-dsl file/s. + +Along with the functional workflow, the tool also expects an input file to read the necessary configurations from. The +input files can either be a **json/ yaml** file. -Along with the functional workflow, the tool also expects an input file to read the necessary configurations from. The input files can either be a **json/ yaml** file. -- The global configurations reside in [global.json](config/global.json). The values defined here, will be inherited in all the functional workflows and can be overwritten in the corresponding input file/s. +- The global configurations reside in [global.json](config/global.json). The values defined here, will be inherited in + all the functional workflows and can be overwritten in the corresponding input file/s. - For `imaging`, see the example input configuration [new-site.json](config/new-site.json). - For `pod-config`, see the example input configuration [pod-config.yml](config/pod-config.yml). -- For `calm-vm-workloads`, see the example input configuration [create_vm_workloads.json](config/create-vm-workloads.json). +- For `calm-vm-workloads`, see the example input + configuration [create_vm_workloads.json](config/create-vm-workloads.json). - For `calm-edgeai-vm-workload`, see the example input configuration [edge_ai.json](config/edge-ai.json). -## Trigger a workflow with an input file +### Trigger a workflow with an input file ```sh > cd framework && python main.py --workflow pod-config -f config/pod-config.yml ``` + > Note: The path to the file, should be defined relative to the root of the project, not to _framework_ directory + diff --git a/calm-dsl-bps/blueprints/LAMP/.local/MYSQL_PASSWORD b/calm-dsl-bps/blueprints/LAMP/.local/MYSQL_PASSWORD new file mode 100644 index 0000000..7aa311a --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/.local/MYSQL_PASSWORD @@ -0,0 +1 @@ +password \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/.local/centos_private_key b/calm-dsl-bps/blueprints/LAMP/.local/centos_private_key new file mode 100644 index 0000000..0f70725 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/.local/centos_private_key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEApvuzdrY5icOY4T5g/DEurq2UCQeNgAYe933xkEidF1B4HRA3 +41AxZCytMMiQntLBeUCZ4dO1EJ9BdAVJwYvlcHoTxnjg4ekuNzj+WEv9eC9H8p7R +SNOgfZXJMf8oETbvNrB8Vwl/ulVVBTzjwvrM+o9Q0LMCmPXeHBGJ2jARks6kca6g +CH+mliCkyU2CjxWNM2lGeqMNosEtDZYeRKml7BAVpKjJq+/2rj/Rlf2nVIMxaxiy +ZJWAXXx+e+UiCuvz2S+sDU9IxPS5hZRJu3SNNWVj3JQbldehc0cq585lq/3m2Otg +NOL2AbUdL8nxH7FtmYMYA+mgBOvsrY9L9fctHpUbTmnk8kjfx5V4e6MClgh55Vza +U1Ikgz/EkPC0QEZIULtyhAy9KaRgq0hfO6BZKdYD83/dlPlCty2jTDI2VzDstMy6 +CxHS7W0KOkvIh4IA6F+DSOLFlsXF9JrWnb48v5yDr7JJPSF2rEIdLtDE/QEUGOVV +z5big7RFXXqMPVG74CdccTZKYVILFowosvrI4GqbLmb1qijOkz09YtuF2GmrvvxW +tL4Yy2/LwjFq67ZEcBreK3tuNzQ2HGFk81WVBblCtMNcKre+yoIwgnsiXbGTcK4d +eb+/NBY6qXnNxBlQNMcjWxdI5/7bJ2MvYz4f+gr++uxK2rSPeyYVMaAeIF8CAwEA +AQKCAgAScR4C8GEua+HIjqEbeTsUo6sDzEe+ae6lms1BCCQBXSvG2fGyMzshOyXt +i1kdkV04zP+xxzXqHgyWibb5nz+5ymHAu7zqdlxaUPsWOV5HFcG2c//dMTI6bH6J +/QopzI1i1t8EpdHrfF0ldPIaqKRWz9k3E24oAo+DPhShekmWkyfqPkUCa0a0sepB +KURQFStBEFFuXe+1l+wn4fp5ztq6VZRuWQ3c5WWzHO1CyX7fgJXGpDWfkjtZF9TI +TigSWLjXtcIkvirlRbnvnM7/2wF1cjszM1BQnj4Ag02jyFHGSwZh0q0PajmNOrl8 +e6sbuvbFy4qtc/HuWTgMexUCl0fuORIfIagTNRNn4qZieTiCKAIup31f+m0RovKp +rQvK8hTY8VapA3AW+fi2cVCeT/OPHBJ0/w/0F829nDmOoeZoiJs+a4w1DIK87mYC +lj43uf7xP+Qhs9RCEXzty8KbYkIrqKO+a503vmQAoaPIZZLQAtkddWsOA6CFroig +LQYybqatzPftJ74fsm1kvHGPm9Ea5SFOFH/AZKIYVgFDSHgcVHjEv2qf9YvviznP +l2WiX7nlLKR1sSInTpIobp7SotmN17A2ipwjeVsyi998Algo7N506huT3gQ/bZHL +F9R0OV7OJRxYkTjqrsJAz+FuqqZ3hv2oGo/AJLUu4/1OMJW/YQKCAQEAz2NRdsg1 +jTY8jhyTeNOMldCb6VWMReVt2/pOqhGW8cTPgz1l+CI1EWhfzqqTquv7i8WkaHZd +ti8MJ8MiMYaNiFdPX5Whcj54y8AlGM1LqU1zi7M1pxS8Gz2NcIxJ8rKf5IKLRuBf +xumUexAEum/aaT7WOB5z4vFOJxBSmGtXp+4QE59BgQmkF/sapp6ZbJqZopJqd7EW +22EuuyvEbNI6DshtPcAin0wKs2IRheSfA+uFoMllm2LyoL8M63YQvBKnwu2WrReU +p6PqDO/DRxY0yB0HdDfbE/aMZPvXFe87VFFs6KrMJ8im6FU/We6Y2sww7fsqH7g+ +YjuGCoMrhzzGWQKCAQEAzh/S8HPI7hOdoXywghN4oR+/zqC83l8OCFDHspmbeFB3 +c3/yFAAYgbiLojNubcGIkIBXGg7Ats9JDWc+LqEhMi0pxDLAUo3Tcv5byVdn+rPE +GE8QVZUOzxGo2zD2dhqk46u5aoC523HAf/H/80ZLfdUbrmaPdXY4fDghP7OORauz +gvLNHHwbpGnZvG4oFCIDgF6rQQv0IPyaMcuvNO4pQ29WQ+Fc2USvgXC8KFVbbsiC +HqFRLBezwAx+a7PYbMvWzyRM4uNjtynyh25ymbETTPET95knNZOxLX0MMjo1YDpu +CPeLRb+jfuYf/Chv+3GzjrgogZiHFYRtUggULYm1dwKCAQA7G8yhNUkGGXpC6uQh +YPJF1PJLG+XiGhUoyHvZPzAtcnHOrxY8FcvYTU5ZQgbahW3plYhZebsV7Ozi/Fvn +j7kRLglibexY+nNFZ1traT/yXi/bllVpbKB8hxukA2gkM0MdRxPZPKQvtm1qcdNX +LAAtVQqLbiqLeUlUGvPfm2CdbV3Jqe9qjsvlRd1/td52tM2rJ14PBTtFhbpjAIN4 +nKaNhhSUsAluK6c1H3NtHQ8xd9RbUmuSUS5kIG/XQIxeflawWTVD2l+1NCPGUiPv +Q+YBTzzLpAvd8bxsH9da2hen3ZlT6zJK+IaR25I12KpaWi5r9dU4uzOUnfCDWIV0 +gx7pAoIBAEchhFGmIzjFXChWNzMsNPylUDOmFIdIljcYaVGg4J3sZsxc4tU0E0Dw +xn768Ki/mLkFEQnU/XwYnEUb08GTqTj/jOJAAU570vRkApjAqQlC7/DTl/dwsOOU +XIkHpIWtPCm1IQ83zOuFncCJaqIxJ+wMlrjge7E1UtjLJe4tsHMqhZAy6cVirsdW +H+V+viX0dvDduot/7IdVpIB6ocPKTEMRMNvL8ZaotUgd91CT5DGe0Ahx8ukcZNNp +fFb+gTlKPp7NEBtcDRUwTJmf67xN9zMeHp3bJETA6uufs43BuJm0kDRX+J/hOjKY +h4ElEz/aRPSDaLcQQ82RHYipcQ0l8JECggEASRryF36oEh7aBEjN+t6gACAsSka1 +2qETmjC7MP1B4Cxz3gSK+ZrgDk8enjGT72zLmSjW11tbmF/n8dJu2xLYB8+obqEl +V2TvfnOij+Uk52rMExNLR9uWEwng+rFMCwRUT9yru8qmQUi6f+oe+WdW76BIinuW +/KXMdlo7h9KXBs5guMqb8RE6We0fJvWB4krIyn6MYODNDR2GN51bTXzSYxUnhhgI +REOfK0EOs1z+nusZv2WZvTu9hApnQAEK56eYS/lIROkw7JkA8rDFoxykhNfZKT5d +ohUeTmRk2X72ck9G6wp8eQXF7S07FedL5xrqWby8sQFi9e/GZK98vndEsQ== +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/.local/centos_public_key b/calm-dsl-bps/blueprints/LAMP/.local/centos_public_key new file mode 100644 index 0000000..d9cf209 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/.local/centos_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCm+7N2tjmJw5jhPmD8MS6urZQJB42ABh73ffGQSJ0XUHgdEDfjUDFkLK0wyJCe0sF5QJnh07UQn0F0BUnBi+VwehPGeODh6S43OP5YS/14L0fyntFI06B9lckx/ygRNu82sHxXCX+6VVUFPOPC+sz6j1DQswKY9d4cEYnaMBGSzqRxrqAIf6aWIKTJTYKPFY0zaUZ6ow2iwS0Nlh5EqaXsEBWkqMmr7/auP9GV/adUgzFrGLJklYBdfH575SIK6/PZL6wNT0jE9LmFlEm7dI01ZWPclBuV16FzRyrnzmWr/ebY62A04vYBtR0vyfEfsW2ZgxgD6aAE6+ytj0v19y0elRtOaeTySN/HlXh7owKWCHnlXNpTUiSDP8SQ8LRARkhQu3KEDL0ppGCrSF87oFkp1gPzf92U+UK3LaNMMjZXMOy0zLoLEdLtbQo6S8iHggDoX4NI4sWWxcX0mtadvjy/nIOvskk9IXasQh0u0MT9ARQY5VXPluKDtEVdeow9UbvgJ1xxNkphUgsWjCiy+sjgapsuZvWqKM6TPT1i24XYaau+/Fa0vhjLb8vCMWrrtkRwGt4re243NDYcYWTzVZUFuUK0w1wqt77KgjCCeyJdsZNwrh15v780Fjqpec3EGVA0xyNbF0jn/tsnYy9jPh/6Cv767EratI97JhUxoB4gXw== no-reply@acme.com \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/LAMP.py b/calm-dsl-bps/blueprints/LAMP/LAMP.py new file mode 100644 index 0000000..5c61b47 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/LAMP.py @@ -0,0 +1,414 @@ +import os # no_qa +from pathlib import Path +from calm.dsl.builtins import * # no_qa +from helpers.general_utils import get_json_file_contents + +# Secret Variables +CENTOS_KEY = read_local_file("centos_private_key") +MYSQL_PASSWORD = read_local_file( + "MYSQL_PASSWORD" +) + +# Credentials +CENTOS_CRED = basic_cred( + "centos", + CENTOS_KEY, + name="CENTOS", + type="KEY", + default=True, + editables={"username": True, "secret": True}, +) + + +# Downloadable images for AHV +AHV_CENTOS_78 = vm_disk_package( + name="AHV_CENTOS_78", config_file="specs/ahv_centos.yaml" +) + +# Subnet, Cluster reference +# Find path to the project root +# we are in root/blueprints/LAMP/ +project_root = Path(__file__).parent.parent.parent.parent + +# todo Convert this to Run time variables and pass it from script +spec = get_json_file_contents(f"{project_root}/config/create-vm-workloads.json") +ACCOUNT_NAME = spec["account_name"] +bp_spec = spec["bp_list"] + +for bp in bp_spec: + if bp["name"] == "LAMP-dsl": + subnet_name = bp["subnet"] + cluster_name = bp["cluster"] + else: + raise Exception("Cluster and Subnet not specified") + + +class MYSQLService(Service): + @action + def PackageInstall(name="Package Install"): + + CalmTask.Exec.ssh( + name="Install MySQL", + filename=os.path.join( + "scripts", "PackageInstallMySQL.sh" + ), + cred=ref(CENTOS_CRED), + target=ref(MYSQLService), + ) + + +class MYSQLVMResources(AhvVmResources): + + memory = 4 + vCPUs = 2 + cores_per_vCPU = 2 + disks = [ + AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(AHV_CENTOS_78, bootable=True) + ] + + nics = [AhvVmNic.NormalNic.ingress(subnet_name, cluster=cluster_name)] + + guest_customization = AhvVmGC.CloudInit( + filename=os.path.join("specs", "basic_linux_vm_cloudinit.yaml") + ) + + +class MYSQLVm(AhvVm): + + name = "MYSQL-VM" + resources = MYSQLVMResources + cluster = Ref.Cluster(cluster_name) + + +class AHVMysqlSubstrate(Substrate): + + os_type = "Linux" + provider_type = "AHV_VM" + account = Ref.Account(ACCOUNT_NAME) + provider_spec = MYSQLVm + provider_spec_editables = read_spec( + os.path.join("specs", "basic_linux_vm_editables.yaml") + ) + readiness_probe = readiness_probe( + connection_type="SSH", + disabled=False, + retries="5", + connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="300", + credential=ref(CENTOS_CRED) + ) + + +class ApacheVmResources(AhvVmResources): + + memory = 4 + vCPUs = 2 + cores_per_vCPU = 2 + disks = [AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(AHV_CENTOS_78, bootable=True)] + + nics = [AhvVmNic.NormalNic.ingress(subnet_name, cluster=cluster_name)] + + guest_customization = AhvVmGC.CloudInit( + filename=os.path.join("specs", "basic_linux_vm_cloudinit.yaml") + ) + + +class ApacheVm(AhvVm): + + name = "APACHE_PHP-VM-@@{calm_array_index}@@" + resources = ApacheVmResources + cluster = Ref.Cluster(cluster_name) + + +class AhvApacheSubstrate(Substrate): + + os_type = "Linux" + provider_type = "AHV_VM" + provider_spec = ApacheVm + account = Ref.Account(ACCOUNT_NAME) + provider_spec_editables = read_spec( + os.path.join("specs", "basic_linux_vm_editables.yaml") + ) + readiness_probe = readiness_probe( + connection_type="SSH", + disabled=False, + retries="5", + connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="300", + credential=ref(CENTOS_CRED), + editables_list=[], + ) + + +class HAPROXYVmResources(AhvVmResources): + + memory = 4 + vCPUs = 2 + cores_per_vCPU = 1 + disks = [AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(AHV_CENTOS_78, bootable=True)] + + nics = [AhvVmNic.NormalNic.ingress(subnet_name, cluster=cluster_name)] + + guest_customization = AhvVmGC.CloudInit( + filename=os.path.join("specs", "basic_linux_vm_cloudinit.yaml") + ) + + +class HAPROXYVm(AhvVm): + + name = "HAPROXY-VM" + resources = HAPROXYVmResources + cluster = Ref.Cluster(cluster_name) + + +class AhvHAPROXYSubstrate(Substrate): + + os_type = "Linux" + provider_type = "AHV_VM" + provider_spec = HAPROXYVm + account = Ref.Account(ACCOUNT_NAME) + provider_spec_editables = read_spec( + os.path.join("specs", "basic_linux_vm_editables.yaml") + ) + readiness_probe = readiness_probe( + connection_type="SSH", + disabled=False, + retries="5", + connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="300", + credential=ref(CENTOS_CRED), + editables_list=[], + ) + + +class APACHEService(Service): + + dependencies = [ref(MYSQLService)] + + @action + def PackageInstall(name="Package Install"): + + CalmTask.Exec.ssh( + name="Install Apache", + filename=os.path.join( + "scripts", + "PackageInstallApache.sh", + ), + cred=ref(CENTOS_CRED), + target=ref(APACHEService), + ) + + +class MYSQLPackage(Package): + + services = [ref(MYSQLService)] + + @action + def __install__(): + + MYSQLService.PackageInstall(name="Package Install") + + +class HAPROXYService(Service): + + dependencies = [ref(APACHEService)] + + @action + def PackageInstall(name="Package Install"): + + CalmTask.Exec.ssh( + name="Install HAProxy", + filename=os.path.join( + "scripts", + "PackageInstallHAProxy.sh", + ), + cred=ref(CENTOS_CRED), + target=ref(HAPROXYService), + ) + + +class APACHEPackage(Package): + + services = [ref(APACHEService)] + + @action + def __install__(): + + APACHEService.PackageInstall(name="Package Install") + + +class AhvMYSQLDeployment(Deployment): + + min_replicas = "1" + max_replicas = "1" + default_replicas = "1" + + packages = [ref(MYSQLPackage)] + substrate = ref(AHVMysqlSubstrate) + + +class HAPROXYPackage(Package): + + services = [ref(HAPROXYService)] + + @action + def __install__(): + + HAPROXYService.PackageInstall(name="Package Install") + + +class AhvApacheDeployment(Deployment): + + min_replicas = "2" + max_replicas = "4" + default_replicas = "2" + + packages = [ref(APACHEPackage)] + substrate = ref(AhvApacheSubstrate) + editables = { + "min_replicas": False, + "default_replicas": False, + "max_replicas": False, + } + + +class AhvHAProxyDeployment(Deployment): + + min_replicas = "1" + max_replicas = "1" + default_replicas = "1" + + packages = [ref(HAPROXYPackage)] + substrate = ref(AhvHAPROXYSubstrate) + + +class Nutanix(Profile): + + deployments = [ + AhvMYSQLDeployment, + AhvApacheDeployment, + AhvHAProxyDeployment, + ] + + MYSQL_PASSWORD = CalmVariable.Simple.Secret( + MYSQL_PASSWORD, + label="", + is_mandatory=False, + is_hidden=False, + runtime=False, + description="", + ) + + @action + def ScaleOut(): + """This action will scale out App Server slaves by given scale out count.""" + + COUNT = CalmVariable.Simple( + "1", + label="", + is_mandatory=False, + is_hidden=False, + runtime=True, + description="", + ) + CalmTask.Scaling.scale_out( + "@@{COUNT}@@", name="Scale Out App", target=ref(AhvApacheDeployment) + ) + CalmTask.Exec.ssh( + name="Configure haproxy", + filename=os.path.join( + "scripts", "ScaleOutApacheConfigurehaproxy.sh" + ), + cred=ref(CENTOS_CRED), + target=ref(HAPROXYService), + ) + + @action + def ScaleIn(): + """This action will scale in App Server slaves by given scale in count.""" + + COUNT = CalmVariable.Simple( + "1", + label="", + is_mandatory=False, + is_hidden=False, + runtime=True, + description="", + ) + CalmTask.Scaling.scale_in( + "@@{COUNT}@@", name="Scale In App", target=ref(AhvApacheDeployment) + ) + CalmTask.Exec.ssh( + name="Configure haproxy", + filename=os.path.join( + "scripts", "ScaleInApacheConfigurehaproxy.sh" + ), + cred=ref(CENTOS_CRED), + target=ref(HAPROXYService), + ) + + @action + def DBBackup(): + """This action will take mysql backup.""" + + BACKUP_FILE_PATH = CalmVariable.Simple( + "~/db_backup", + label="", + is_mandatory=False, + is_hidden=False, + runtime=True, + description="", + ) + CalmTask.Exec.ssh( + name="Do DB Backup", + filename=os.path.join( + "scripts", "MySqlDbBackup.sh" + ), + target=ref(MYSQLService), + ) + + @action + def DBRestore(): + """This action will restore mysql db from specified file.""" + + RESTORE_FILE_PATH = CalmVariable.Simple( + "~/db_backup/db_dump.sql.gz", + label="", + is_mandatory=False, + is_hidden=False, + runtime=True, + description="", + ) + CalmTask.Exec.ssh( + name="Do DB Restore", + filename=os.path.join( + "scripts", "MySqlDbRestore.sh" + ), + target=ref(MYSQLService), + ) + + +class LAMP(Blueprint): + """* [Lamp](http://@@{HAPROXY.address}@@)""" + + services = [MYSQLService, APACHEService, HAPROXYService] + packages = [MYSQLPackage, APACHEPackage, HAPROXYPackage, AHV_CENTOS_78] + substrates = [AHVMysqlSubstrate, AhvApacheSubstrate, AhvHAPROXYSubstrate] + profiles = [Nutanix] + credentials = [CENTOS_CRED] + + +class BpMetadata(Metadata): + + categories = {"AppFamily": "DevOps"} + + +def main(): + print(LAMP.json_dumps(pprint=True)) + + +if __name__ == "__main__": + main() diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbBackup.sh b/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbBackup.sh new file mode 100644 index 0000000..9ab17f0 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbBackup.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +### Setup variables +mysql_password="@@{MYSQL_PASSWORD}@@" +date_part=`date +%F` +mkdir -p @@{BACKUP_FILE_PATH}@@ +sudo mysqldump -u root -p${mysql_password} --all-databases | sudo gzip -9 > @@{BACKUP_FILE_PATH}@@/db_dump.sql.gz \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbRestore.sh b/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbRestore.sh new file mode 100644 index 0000000..f2ee4b1 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/MySqlDbRestore.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +### Setup variables +mysql_password="@@{MYSQL_PASSWORD}@@" +db_file=@@{RESTORE_FILE_PATH}@@ + +sudo gunzip < $db_file | sudo mysql -u root -p${mysql_password} \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallApache.sh b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallApache.sh new file mode 100644 index 0000000..9528727 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallApache.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -ex + +sudo yum update -y +sudo yum -y install epel-release + +## Disable SELinux +sudo setenforce 0 +sudo sed -i 's/permissive/disabled/' /etc/sysconfig/selinux + +# Disable firewall +sudo systemctl stop firewalld || true +sudo systemctl disable firewalld || true + +sudo rpm -Uvh https://mirror.webtatic.com/yum/el7/webtatic-release.rpm +sudo yum update -y + +## -*- Install httpd and php +sudo yum install -y httpd php56w php56w-mysql + +## Configure php module in apache +echo " + DirectoryIndex index.php index.html index.cgi index.pl index.php index.xhtml index.htm +" | sudo tee /etc/httpd/conf.modules.d/dir.conf + +echo "" | sudo tee /var/www/html/info.php + +## Restart apache service +sudo systemctl restart httpd +sudo systemctl enable httpd \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallHAProxy.sh b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallHAProxy.sh new file mode 100644 index 0000000..9498ea2 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallHAProxy.sh @@ -0,0 +1,72 @@ +#!/bin/bash +set -ex + +sudo yum update -y +sudo yum install -y haproxy + +## Disable SELinux +sudo setenforce 0 +sudo sed -i 's/permissive/disabled/' /etc/sysconfig/selinux + +# Disable firewall +sudo systemctl stop firewalld || true +sudo systemctl disable firewalld || true + +## Variable Initialization +APACHE_IP_LIST="@@{APACHEService.address}@@" + +## Install HAProxy Packages +port=80 +sudo yum update -y +sudo yum install -y haproxy + +## Generate HAProxy Conf +echo "global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 4096 + quiet + user haproxy + group haproxy +defaults + log global + mode http + retries 3 + timeout client 50s + timeout connect 5s + timeout server 50s + option dontlognull + option httplog + option redispatch + balance roundrobin +# Set up application listeners here. +listen stats 0.0.0.0:8080 + mode http + log global + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats uri /stats +listen admin + bind 127.0.0.1:22002 + mode http + stats uri / +frontend http + maxconn 2000 + bind 0.0.0.0:80 + default_backend servers-http +backend servers-http" | sudo tee /etc/haproxy/haproxy.cfg +sudo sed -i 's/server host-/#server host-/g' /etc/haproxy/haproxy.cfg + +## Configure Apache server ip list in HAProxy conf +hosts=$(echo "${APACHE_IP_LIST}" | sed 's/^,//' | sed 's/,$//' | tr "," "\n") +for host in $hosts +do + echo " server host-${host} ${host}:${port} weight 1 maxconn 100 check" | sudo tee -a /etc/haproxy/haproxy.cfg +done + +## Enable and Restart haproxy service +sudo systemctl daemon-reload +sudo systemctl enable haproxy +sudo systemctl restart haproxy diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallMySQL.sh b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallMySQL.sh new file mode 100644 index 0000000..4906b35 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/PackageInstallMySQL.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -ex + +sudo yum install -y "http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm" + +## Disable SELinux +sudo setenforce 0 +sudo sed -i 's/permissive/disabled/' /etc/sysconfig/selinux + +# Disable firewall +sudo systemctl stop firewalld || true +sudo systemctl disable firewalld || true + +sudo yum update -y +sudo yum install -y mysql-community-server.x86_64 + +sudo /bin/systemctl start mysqld +sudo /bin/systemctl enable mysqld + + +## Enable and start MySQL Services +sudo systemctl enable mysqld +sudo systemctl start mysqld + +## Fix to obtain temp password and set it to blank +password=$(sudo grep -oP 'temporary password(.*): \K(\S+)' /var/log/mysqld.log) +sudo mysqladmin --user=root --password="$password" password aaBB**cc1122 +sudo mysql --user=root --password=aaBB**cc1122 -e "UNINSTALL COMPONENT 'file://component_validate_password'" +sudo mysqladmin --user=root --password="aaBB**cc1122" password "" + +## -*- Mysql secure installation +mysql -u root<<-EOF +ALTER USER 'root'@'localhost' IDENTIFIED WITH caching_sha2_password BY '@@{MYSQL_PASSWORD}@@'; +DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1'); +DELETE FROM mysql.user WHERE User=''; +DELETE FROM mysql.db WHERE Db='test' OR Db='test\_%'; +FLUSH PRIVILEGES; +EOF + diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/ScaleInApacheConfigurehaproxy.sh b/calm-dsl-bps/blueprints/LAMP/scripts/ScaleInApacheConfigurehaproxy.sh new file mode 100644 index 0000000..5361e00 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/ScaleInApacheConfigurehaproxy.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -ex + +sudo setenforce 0 +sudo sed -i 's/permissive/disabled/' /etc/sysconfig/selinux + +port=80 + + +echo "global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 4096 + quiet + user haproxy + group haproxy +defaults + log global + mode http + retries 3 + timeout client 50s + timeout connect 5s + timeout server 50s + option dontlognull + option httplog + option redispatch + balance roundrobin +# Set up application listeners here. +listen stats 0.0.0.0:8080 + mode http + log global + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats uri /stats +listen admin + bind 127.0.0.1:22002 + mode http + stats uri / +frontend http + maxconn 2000 + bind 0.0.0.0:80 + default_backend servers-http +backend servers-http" | sudo tee /etc/haproxy/haproxy.cfg + +sudo sed -i 's/server host-/#server host-/g' /etc/haproxy/haproxy.cfg + +hosts=$(echo "@@{APACHEService.address}@@" | sed 's/^,//' | sed 's/,$//' | tr "," "\n") + + +for host in $hosts +do + echo " server host-${host} ${host}:${port} weight 1 maxconn 100 check" | sudo tee -a /etc/haproxy/haproxy.cfg +done + +sudo systemctl daemon-reload +sudo systemctl enable haproxy +sudo systemctl restart haproxy diff --git a/calm-dsl-bps/blueprints/LAMP/scripts/ScaleOutApacheConfigurehaproxy.sh b/calm-dsl-bps/blueprints/LAMP/scripts/ScaleOutApacheConfigurehaproxy.sh new file mode 100644 index 0000000..5361e00 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/scripts/ScaleOutApacheConfigurehaproxy.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -ex + +sudo setenforce 0 +sudo sed -i 's/permissive/disabled/' /etc/sysconfig/selinux + +port=80 + + +echo "global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 4096 + quiet + user haproxy + group haproxy +defaults + log global + mode http + retries 3 + timeout client 50s + timeout connect 5s + timeout server 50s + option dontlognull + option httplog + option redispatch + balance roundrobin +# Set up application listeners here. +listen stats 0.0.0.0:8080 + mode http + log global + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats uri /stats +listen admin + bind 127.0.0.1:22002 + mode http + stats uri / +frontend http + maxconn 2000 + bind 0.0.0.0:80 + default_backend servers-http +backend servers-http" | sudo tee /etc/haproxy/haproxy.cfg + +sudo sed -i 's/server host-/#server host-/g' /etc/haproxy/haproxy.cfg + +hosts=$(echo "@@{APACHEService.address}@@" | sed 's/^,//' | sed 's/,$//' | tr "," "\n") + + +for host in $hosts +do + echo " server host-${host} ${host}:${port} weight 1 maxconn 100 check" | sudo tee -a /etc/haproxy/haproxy.cfg +done + +sudo systemctl daemon-reload +sudo systemctl enable haproxy +sudo systemctl restart haproxy diff --git a/calm-dsl-bps/blueprints/LAMP/specs/ahv_centos.yaml b/calm-dsl-bps/blueprints/LAMP/specs/ahv_centos.yaml new file mode 100644 index 0000000..6cf93c2 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/specs/ahv_centos.yaml @@ -0,0 +1,9 @@ +image: + name: AHV_CENTOS_78 + type: DISK_IMAGE + source: http://download.nutanix.com/Calm/CentOS-7-x86_64-2003.qcow2 + architecture: X86_64 + +product: + name: AHV_CENTOS_78 + version: 1.0 \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_cloudinit.yaml b/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_cloudinit.yaml new file mode 100644 index 0000000..ca82f26 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_cloudinit.yaml @@ -0,0 +1,6 @@ +#cloud-config +users: + - name: @@{CENTOS.username}@@ + ssh-authorized-keys: + - @@{CENTOS.public_key}@@ + sudo: ['ALL=(ALL) NOPASSWD:ALL'] diff --git a/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_editables.yaml b/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_editables.yaml new file mode 100644 index 0000000..0a7db65 --- /dev/null +++ b/calm-dsl-bps/blueprints/LAMP/specs/basic_linux_vm_editables.yaml @@ -0,0 +1,3 @@ +resources: + nic_list: {} + serial_port_list: {} \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/.local/edge-key b/calm-dsl-bps/blueprints/edge_ai/.local/edge-key new file mode 100644 index 0000000..4eabcaf --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/.local/edge-key @@ -0,0 +1,49 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAgEAqJxytnZwGjwQMrqsk4yFp3/p7Kggdgl5neQ9H7pxBVmPZz7XYQqV +z3vxEUCuuhUumLv7oiGADYOzCZ2feTpt0IuvpPgS+MVC5M3Y9cp7qWWe8heUL7r/oKKvNW +xnGa8QBfyxaYOciHzG3PkRg4NX/n3joVyLUT7o0oyYz47J68e6MzcIVYk0ZGMiZ+MUhUhb +L/hMGDeSzYGV9NVwae3AiKvVsZkDR9Ii02eg06Q9hISv06F5TWaSBLjpZ86AAE42/qStLJ +pG+czH0vmxeRzqFB64AEKFPY5WWKJe5oQsnaBiAqtErNgqQOZC/oVGUswCmDok5ZtacRFL +iunFy/prhxMjqRJxFjEevISOkHLTkZOa5ggp8IryFJdSHncctApGfhXhvGGAVheXPraBeL +vDlEJ/n2++poeTCLizGblfOPKOK8maj5nimC+5iiNIwaFFSMXp8Ocn2nJZens7NWMZjaJN +bWTHHOoDWy7R4P8irjB9stpV7BeB+wv7vWHHM23Vl9mEdpf0qQSbWPjqsg0Bu30QLPKcuH +4PIz0ZNI0CTkJYwivdjWie0IoEyHBTrgnnxd6edDUsJtmsl6SFwkLiILorXugfi2nsV21V +UdBg+f3Fq8cD3oFxKCTfSSY3MVhflWbrXGhLYEhlpde9QcyvqJz9LQURvSFlkBDBZu7zsk +kAAAdQjp98AI6ffAAAAAAHc3NoLXJzYQAAAgEAqJxytnZwGjwQMrqsk4yFp3/p7Kggdgl5 +neQ9H7pxBVmPZz7XYQqVz3vxEUCuuhUumLv7oiGADYOzCZ2feTpt0IuvpPgS+MVC5M3Y9c +p7qWWe8heUL7r/oKKvNWxnGa8QBfyxaYOciHzG3PkRg4NX/n3joVyLUT7o0oyYz47J68e6 +MzcIVYk0ZGMiZ+MUhUhbL/hMGDeSzYGV9NVwae3AiKvVsZkDR9Ii02eg06Q9hISv06F5TW +aSBLjpZ86AAE42/qStLJpG+czH0vmxeRzqFB64AEKFPY5WWKJe5oQsnaBiAqtErNgqQOZC +/oVGUswCmDok5ZtacRFLiunFy/prhxMjqRJxFjEevISOkHLTkZOa5ggp8IryFJdSHncctA +pGfhXhvGGAVheXPraBeLvDlEJ/n2++poeTCLizGblfOPKOK8maj5nimC+5iiNIwaFFSMXp +8Ocn2nJZens7NWMZjaJNbWTHHOoDWy7R4P8irjB9stpV7BeB+wv7vWHHM23Vl9mEdpf0qQ +SbWPjqsg0Bu30QLPKcuH4PIz0ZNI0CTkJYwivdjWie0IoEyHBTrgnnxd6edDUsJtmsl6SF +wkLiILorXugfi2nsV21VUdBg+f3Fq8cD3oFxKCTfSSY3MVhflWbrXGhLYEhlpde9QcyvqJ +z9LQURvSFlkBDBZu7zskkAAAADAQABAAACADl8XX/oEYBgSgWBr6T+m/LnoZGWILsdr+Et +F8nhfvcb+dBxlGz5duouj5oaO3gmEKTyMnCsi0QQStJUczKZwScA+Pl9uromRxBLPlVOm6 +q9gtt3PoanctAJp0LHmr8e+I+R/8rBnHTMVeCw6k+0UCPsN9ej/bvNfwOJ+++H9z/z8H/V +m3nONZakVe6ldTGb4bsRJKB9EggOzlmTC6iR7rqqG0lZqcyq0ETTZ/nAXVj5BBlbTEgfWF +lT0gAaX3m7MPTQoFo8n0Y7k0VS7DcmrIf/kSxFgbfi931wnnppRezX6ZKPYfKsIHvQBbdR +KRDBjYDpEftWZBd4lhDecxmAX3g38Vhusmhtbkkzr8PmpZs6KCKXcc1odMlyIFgzG0EF6F +UkBbgEinUGcM7lHbv73pWQKHOVjWrX32hLB5yqTQ7g+G3o814wx8wgptuHl7j25FrgbKtF +OLzL1s914CT0MOx856fPQaOHyPHDjVdopMkp8gaW2CPQk860frLi6knf4zjLaVV2P18Fw3 +oU7M3cWoRVagRqtjJuJWqijF4JJYiZbfv0TuZGXegBM30wiTDNTTEGzjyRR0DjFDNiu5F7 +8iQMo5giRPo5jlJ+NBaJOnGaQ20bTCusPggmbgXnTQHeGyngQmDRa5tE2TDnssW8MWypFV +kvrNVnL7tqTOhJLKKBAAABAANLmo9kVZ7vu1Q6Kxm7c73X3ZxIX5fVx8Ce1wAF3RlqiofH +BBobfUnQKup2ijpyDBw8KvqOqSPn6kLYM94lvlOJbQBlp1qXT6+dQJZsZZK/F7A7PsE7Z6 +NaPff+MlkfZO7cpnHHIwhNSujUN/ktvRIZk7ZBMbDUZe5IhTXTEj90QRwZE5DfHwo8CtQF +oZbf4wPQakxhFxHyBJHG+YD9E8oORfr66WArIqkmAldHJVsd/Uy7+hGhuqwMMm93yjlcfn +1jcRP+J7zS7/9kSWUkbL8SyefA7F7WZOtqQ6HNI4y//B8kJ9xnLqLUey043ylwKlWuY2kY +GYX6dZR1pXcmakAAAAEBAOA+GMzRIKGaDYrtK7XUop+05aJqp0dSlVLq7E9iFJi+o18T8A +K8KaAOPt9CvbCtQL7VGDMHn0wxmpi4r+sCr0HRLjK+Hs12OWHzP+SQPjynRILhFT6ij+eV +aPtu5+uX6wfv1rR3+4ZvBONo6tpzdi6uIw7eJbToCUvIBogm19D0Cj7HT/6qZ0FnZbn7CO +eP1Sr2qJ9U2kYMZl+fPblUF7FaWyS9uyZc7sSJyP9Vx++VbF5m9ML2vircQMnZYNXOIbrY +TKhO76jyDzQW/TrjqohhqWExVhCajL3eT3n6CYMhpPMJ9PDGzxl79ZPwdh/qw4hGHQuV4z +KYxNEogugFuRUAAAEBAMB9b6Qu3RcAzb/gAB3E5IGnhzsTdsrLA0RKevvJDrPgFfd7QpUu +wPEwlIiMuAAPEaB4KlGxbr6b0HIaxaAN+UKqTiP/1JD15G+EKCqEz9n4R3veGmwSgznKcl +PmBqhK6GEfqmhXWQJHgpr6qdDFAzoNVTcuJLFW82WshrYYd1w0OHhXxHAsCoaWqtTmpXd5 +aGJAFXT7F3SBrLvZdv4rNtpthYYJnE3CITVyN5TZOG3A0PoJ0Es116dUzWE9JsVNjqOURu +Khz72UfuQzjVnPINeuiMhW/XuiZ0onDiSkzp1F6efe003yOklZz8jvvyEvHcnOxpc/2VY5 +AwtwtwuMOWUAAAAZY2hyaXMuZ2xvdmVyQEMwMlpRNUZGTFZETAEC +-----END OPENSSH PRIVATE KEY----- \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/.local/edge-key.pub b/calm-dsl-bps/blueprints/edge_ai/.local/edge-key.pub new file mode 100644 index 0000000..68acdd1 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/.local/edge-key.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQConHK2dnAaPBAyuqyTjIWnf+nsqCB2CXmd5D0funEFWY9nPtdhCpXPe/ERQK66FS6Yu/uiIYANg7MJnZ95Om3Qi6+k+BL4xULkzdj1ynupZZ7yF5Qvuv+goq81bGcZrxAF/LFpg5yIfMbc+RGDg1f+feOhXItRPujSjJjPjsnrx7ozNwhViTRkYyJn4xSFSFsv+EwYN5LNgZX01XBp7cCIq9WxmQNH0iLTZ6DTpD2EhK/ToXlNZpIEuOlnzoAATjb+pK0smkb5zMfS+bF5HOoUHrgAQoU9jlZYol7mhCydoGICq0Ss2CpA5kL+hUZSzAKYOiTlm1pxEUuK6cXL+muHEyOpEnEWMR68hI6QctORk5rmCCnwivIUl1Iedxy0CkZ+FeG8YYBWF5c+toF4u8OUQn+fb76mh5MIuLMZuV848o4ryZqPmeKYL7mKI0jBoUVIxenw5yfacll6ezs1YxmNok1tZMcc6gNbLtHg/yKuMH2y2lXsF4H7C/u9YcczbdWX2YR2l/SpBJtY+OqyDQG7fRAs8py4fg8jPRk0jQJOQljCK92NaJ7QigTIcFOuCefF3p50NSwm2ayXpIXCQuIguite6B+LaexXbVVR0GD5/cWrxwPegXEoJN9JJjcxWF+VZutcaEtgSGWl171BzK+onP0tBRG9IWWQEMFm7vOySQ== chris.glover@C02ZQ5FFLVDL \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference.sh new file mode 100644 index 0000000..c312ddb --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +if [ '@@{calm_array_index}@@' != '0' ]; then + echo "AI Inference must be run from the first VM in the replica set" + exit 0 +fi + +pip3 install -q -r @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/requirements.txt +export PATH=$PATH:/home/ubuntu/.local/bin + +MODEL_NAME="@@{AI_MODEL_NAME}@@" +echo $MODEL_NAME +TRAINING_OUTPUT_FOLDER="@@{AI_TRAINING_OUTPUT_FOLDER}@@" +echo $TRAINING_OUTPUT_FOLDER +TRAINING_OUTPUT_FILE="@@{AI_TRAINING_OUTPUT_FILE}@@" +echo $TRAINING_OUTPUT_FILE + +if [ -z "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 @@{EXTRA_PARAMS}@@" + bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 @@{EXTRA_PARAMS}@@ +elif [ -n "$MODEL_NAME" ] && [ -n "$TRAINING_OUTPUT_FOLDER" ] && [ -n "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 @@{EXTRA_PARAMS}@@" + bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 @@{EXTRA_PARAMS}@@ +elif [ -z "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -n "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER_DEFAULT}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 @@{EXTRA_PARAMS}@@" + bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER_DEFAULT}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 @@{EXTRA_PARAMS}@@ +elif [ -z "$MODEL_NAME" ] && [ -n "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE_DEFAULT}@@ -g 1 @@{EXTRA_PARAMS}@@" + bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE_DEFAULT}@@ -g 1 @@{EXTRA_PARAMS}@@ +elif [ -n "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 @@{EXTRA_PARAMS}@@" + bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 @@{EXTRA_PARAMS}@@ +fi \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_start.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_start.sh new file mode 100644 index 0000000..d74b439 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_start.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +if [ '@@{calm_array_index}@@' != '0' ]; then + echo "AI Inference must be run from the first VM in the replica set" + exit 0 +fi + +pip3 install -q -r @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/requirements.txt +export PATH=$PATH:/home/ubuntu/.local/bin + +MODEL_NAME="@@{AI_MODEL_NAME}@@" +echo $MODEL_NAME +TRAINING_OUTPUT_FOLDER="@@{AI_TRAINING_OUTPUT_FOLDER}@@" +echo $TRAINING_OUTPUT_FOLDER +TRAINING_OUTPUT_FILE="@@{AI_TRAINING_OUTPUT_FILE}@@" +echo $TRAINING_OUTPUT_FILE + +if [ -z "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 -k @@{EXTRA_PARAMS}@@" + nohup @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 -k @@{EXTRA_PARAMS}@@ +elif [ -n "$MODEL_NAME" ] && [ -n "$TRAINING_OUTPUT_FOLDER" ] && [ -n "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 -k @@{EXTRA_PARAMS}@@" + nohup @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 -k @@{EXTRA_PARAMS}@@ +elif [ -z "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -n "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER_DEFAULT}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 -k @@{EXTRA_PARAMS}@@" + nohup @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER_DEFAULT}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 -k @@{EXTRA_PARAMS}@@ +elif [ -z "$MODEL_NAME" ] && [ -n "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE_DEFAULT}@@ -g 1 -k @@{EXTRA_PARAMS}@@" + nohup @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME_DEFAULT}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE_DEFAULT}@@ -g 1 -k @@{EXTRA_PARAMS}@@ +elif [ -n "$MODEL_NAME" ] && [ -z "$TRAINING_OUTPUT_FOLDER" ] && [ -z "$TRAINING_OUTPUT_FILE" ]; +then + echo "@@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 -k @@{EXTRA_PARAMS}@@" + nohup @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n @@{AI_MODEL_NAME}@@ -d @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/data -g 1 -k @@{EXTRA_PARAMS}@@ +fi + +#bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/inference/code/torchserve/run.sh -n resnet50 -m @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@/@@{AI_TRAINING_OUTPUT_FILE}@@ -g 1 -k \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_stop.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_stop.sh new file mode 100644 index 0000000..891ae6f --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_inference_stop.sh @@ -0,0 +1,2 @@ +export PATH=$PATH:/home/ubuntu/.local/bin +torchserve --stop \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_training.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_training.sh new file mode 100644 index 0000000..030e4f5 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/ai_training.sh @@ -0,0 +1,10 @@ +pip3 install -q -r @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/training/code/requirements.txt + +if [ '@@{calm_array_index}@@' != '0' ]; +then exit 0 +fi + +#tar -xvf $WORK_DIR/training/data/training_images.tar -C @@{NFS_MOUNT_POINT}@@/dataset +sudo mkdir @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@ && sudo chmod 777 @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@ +echo @@{calm_array_address}@@ +bash @@{NFS_MOUNT_POINT}@@/@@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/training/code/run.sh -n @@{calm_int(WORKER)}@@ -h @@{calm_array_address}@@ -m @@{address}@@ -c "python3 training.py --data-folder @@{NFS_MOUNT_POINT}@@/dataset --output-folder @@{NFS_MOUNT_POINT}@@/@@{AI_TRAINING_OUTPUT_FOLDER}@@ --output-model-file @@{AI_TRAINING_OUTPUT_FILE}@@ @@{EXTRA_PARAMS}@@" \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/nai_dl_bench_data.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/nai_dl_bench_data.sh new file mode 100644 index 0000000..a67af2e --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/day-two/nai_dl_bench_data.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ '@@{calm_array_index}@@' != '0' ]; +then exit 0 +fi + +cd @@{NFS_MOUNT_POINT}@@ + +if [ ! -d @@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@ ]; +then + echo "Getting the NAI DL BENCH DATA Version @@{NAI_DL_BENCH_VERSION}@@" + curl -O -L "https://github.com/nutanix/nai-dl-bench/archive/v@@{NAI_DL_BENCH_VERSION}@@.tar.gz" > "v@@{NAI_DL_BENCH_VERSION}@@.tar.gz" + echo "Unzipping the NAI DL BENCH DATA Version @@{NAI_DL_BENCH_VERSION}@@" + tar -xvf v@@{NAI_DL_BENCH_VERSION}@@.tar.gz @@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@ + mkdir -p dataset + echo "Unzipping the training dataset to the dataset folder" + tar -xvf @@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@/training/data/training_images.tar -C dataset +elif [ -d @@{NFS_WORKING_DIRECTORY}@@@@{NAI_DL_BENCH_VERSION}@@ ]; +then + echo "NAI DL BENCH DATA Version @@{NAI_DL_BENCH_VERSION}@@ is already present on the NFS share path @@{NFS_PATH}@@" + exit 0 +fi \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/hostname.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/hostname.sh new file mode 100644 index 0000000..5b86d4f --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/hostname.sh @@ -0,0 +1 @@ +echo "@@{calm_array_address[0]}@@ @@{calm_array_name[0]}@@" | sudo tee -a /etc/hosts \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ngt/install_ngt.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ngt/install_ngt.sh new file mode 100644 index 0000000..7f9698d --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ngt/install_ngt.sh @@ -0,0 +1,9 @@ +# region headers +# task_name: InstallNgt +# description: mounts the ngt iso (assuming /dev/sr0) and installs +# Nutanix Guest Tools. +# output vars: none +# dependencies: none +# endregion +sudo mount /dev/sr0 /media +sudo python3 /media/installer/linux/install_ngt.py diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/setup.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/setup.sh new file mode 100644 index 0000000..d265220 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/setup.sh @@ -0,0 +1,15 @@ +sudo sed -i "/#\$nrconf{restart} = 'i';/s/.*/\$nrconf{restart} = 'a';/" /etc/needrestart/needrestart.conf +sudo apt-get -q update +sudo apt -q install openjdk-17-jdk python3-pip -y +curl -fSsl -O https://us.download.nvidia.com/tesla/@@{NVIDIA_DRIVER_VERSION}@@/NVIDIA-Linux-x86_64-@@{NVIDIA_DRIVER_VERSION}@@.run +sudo sh NVIDIA-Linux-x86_64-@@{NVIDIA_DRIVER_VERSION}@@.run -s +sudo apt -q install openmpi-bin -y +sudo apt -q install nfs-common -y +sudo mkdir -p @@{NFS_MOUNT_POINT}@@ +sudo echo "@@{NFS_PATH}@@ @@{NFS_MOUNT_POINT}@@ nfs nconnect=3 0 1" | sudo tee -a /etc/fstab +sudo mount -av +sudo mount @@{NFS_MOUNT_POINT}@@ +#export WORK_DIR=ai +#sudo mkdir $WORK_DIR +#curl -O -L "https://github.com/nutanix/nai-dl-bench/archive/v0.2.0.tar.gz" > "$WORK_DIR/v0.2.0.tar.gz" +#tar -xvf $WORK_DIR/v0.2.0.tar.gz -C $WORK_DIR --strip-components=1 \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ssh_key_copy.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ssh_key_copy.sh new file mode 100644 index 0000000..64e0a0e --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/ssh_key_copy.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo 'set -o vi' >> ~/.bashrc +echo 'alias vi=vim' >> ~/.bashrc +echo 'StrictHostKeyChecking no' > ~/.ssh/config +chmod 600 ~/.ssh/config + +echo "Creating local private and public SSH Keys" +echo "@@{cred_os.secret}@@" > ~/.ssh/id_rsa +echo "@@{os_cred_public_key}@@" > ~/.ssh/id_rsa.pub +chmod 600 ~/.ssh/id_rsa \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/test_calm_array.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/test_calm_array.sh new file mode 100644 index 0000000..1270439 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/test_calm_array.sh @@ -0,0 +1,16 @@ +if [ '@@{calm_array_index}@@' != '0' ]; +then exit 0 +fi + +echo "StrictHostKeyChecking=no" >| ~/.ssh/config +ssh-keygen -t rsa -b 4096 -f ~/.ssh/edge_rsa -q -N "" + +master_node='@@{address}@@' + +for i in $(echo '@@{calm_array_address}@@' | tr ',' '\n') +do +if [ $i != "$master_node" ]; +then +ssh-copy-id -i /home/ubuntu/.ssh/edge_rsa ubuntu@$i +fi +done \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/validate_driver.sh b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/validate_driver.sh new file mode 100644 index 0000000..76563ed --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/common_task_library/install/validate_driver.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [ -f /proc/driver/nvidia/version ]; +then + cat /proc/driver/nvidia/version + export PATH="$HOME/.local/bin:$PATH" + exit 0 +else + echo "The NVDIA driver is not present on the system. Check the output for any errors" + exit 1 +fi \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/Makefile b/calm-dsl-bps/blueprints/edge_ai/linux/Makefile new file mode 100644 index 0000000..7c8de5e --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/Makefile @@ -0,0 +1,147 @@ +CALM_ENVIRONMENT ?= +export CALM_ENVIRONMENT + +BASE64_FLAG := +ifeq ($(OS),Windows_NT) + BASE64_FLAG = -d +else + UNAME_S := $(shell uname -s) + ifeq ($(UNAME_S),Linux) + BASE64_FLAG = -d + endif + ifeq ($(UNAME_S),Darwin) + BASE64_FLAG = -D + endif +endif + +PC_IP_ADDRESS := $$(cat ../../.local/${CALM_ENVIRONMENT}/pc_ip_address) +PC_PORT := 9440 +PC_USER := $$(cat ../../.local/${CALM_ENVIRONMENT}/prism_central_user) +PC_PASS := $$(cat ../../.local/${CALM_ENVIRONMENT}/prism_central_password | base64 $(BASE64_FLAG)) + +DSL_INIT_PARAMS := --ip ${PC_IP_ADDRESS} --port ${PC_PORT} --username ${PC_USER} --password ${PC_PASS} + +GIT_BRANCH_NAME := $(shell git rev-parse --abbrev-ref HEAD | head -c15) +GIT_COMMIT_ID := $(shell git rev-parse --short HEAD) +GIT_COMMIT_ID_PREVIOUS := $(shell git rev-list --parents -n 1 $$(git rev-parse --short HEAD) | awk -F'[ ]' '{print $$2}' | cut -c -7) + +test-commit-id: + @echo ${GIT_COMMIT_ID_PREVIOUS} + +# Blueprint Git Tag remove dot notation because dots are not allowed in Blueprint naming. +BP_GIT_TAG := $(shell git rev-list --tags --max-count=1 | xargs -I {} git describe --tags {} | tr -d '.') + +# Marketplace Git Tag leaves dot notation and remove 'v' character to stay in line with existing semantic versioning guidelines. +MP_GIT_TAG := $(shell git rev-list --tags --max-count=1 | xargs -I {} git describe --tags {} | tr -d 'v') +MP_GIT_TAG_PREVIOUS := $(shell tags=$$(git rev-list --tags --max-count=2 | awk -F'[ ]' '{print $$1}{print $$2}') && tag=$$(echo $$tags | awk -F'[ ]' '{print$$2}' | xargs -I {} git describe --tags {} | awk -F'[-]' '{print$$1}{print$$2}' | tr ' ' '-') && echo $$tag | tr ' ' '-' | tr -d 'v') + +test-mp-tag: + @echo ${MP_GIT_TAG_PREVIOUS} + +BLUEPRINT_SUFFIX_NAME := +ifeq ($(GIT_BRANCH_NAME),main) + ifeq ($(strip $(BP_GIT_TAG)),) + BLUEPRINT_SUFFIX_NAME = ${CALM_ENVIRONMENT}-${GIT_BRANCH_NAME}-${GIT_COMMIT_ID} + endif + ifneq ($(strip $(BP_GIT_TAG)),) + BLUEPRINT_SUFFIX_NAME = $(BP_GIT_TAG) + endif +else + BLUEPRINT_SUFFIX_NAME := ${CALM_ENVIRONMENT}-${GIT_BRANCH_NAME}-${GIT_COMMIT_ID} +endif + +DSL_FILENAME := "blueprint-lin.py" +BLUEPRINT_NAME := "lin-${BLUEPRINT_SUFFIX_NAME}" +MARKETPLACE_ITEM_NAME := "Ubuntu-Edge-AI" +MARKETPLACE_ITEM_NAME_TEST := "Ubuntu-Edge-AI-Test" +MARKETPLACE_PRJ_LIST ?= phx_CALM_BP_DEV +MARKETPLACE_CATEGORY := "DevOps" +GLOBAL_MARKETPLACE_ICON_NAME := "ubuntu" +CALM_AHV_PROFILE_NAME := Default +CALM_AHV_PROFILE_NAME_ABBR := $(shell echo ${CALM_AHV_PROFILE_NAME} | head -c3 | tr '[:upper:]' '[:lower:]') +AHV_TEST_PARAMSFILE := "tests/ahv-test-params.py" +AHV_APP_TEST_NAME := $(shell echo ${BLUEPRINT_NAME}-${CALM_AHV_PROFILE_NAME_ABBR} | tr '[:upper:]' '[:lower:]') + +ifeq ($(CALM_ENVIRONMENT),phx) + CALM_ENV_AHV_NAME = phx_AHV_phx- +endif +ifeq ($(CALM_ENVIRONMENT),ams) + CALM_ENV_AHV_NAME = ams_AHV_ams- +endif + +print-make-envs: + @echo $(OS) + @echo BASE64_FLAG=$(BASE64_FLAG) + @echo CALM_ENVIRONMENT=$(CALM_ENVIRONMENT) + +compile-bp: + calm compile bp -f ${DSL_FILENAME} + +compile-bp-json: + calm compile bp -f ${DSL_FILENAME} -o json > bp.json + +create-bp: + calm compile bp -f ${DSL_FILENAME} + calm create bp -f ${DSL_FILENAME} --name ${BLUEPRINT_NAME} --force + +launch-ahv-test-bp: + @echo CALM_ENV_AHV_NAME=${CALM_ENV_AHV_NAME} + calm launch bp ${BLUEPRINT_NAME} --app_name ${AHV_APP_TEST_NAME} -ws --environment ${CALM_ENV_AHV_NAME} --profile_name ${CALM_AHV_PROFILE_NAME} \ + --launch_params ${AHV_TEST_PARAMSFILE} + calm get apps --name ${AHV_APP_TEST_NAME} + +launch-ahv-test-mp: + calm launch marketplace item --app-name ${AHV_APP_TEST_NAME} --environment ${CALM_ENV_AHV_NAME} --profile-name ${CALM_AHV_PROFILE_NAME} \ + --launch_params ${AHV_TEST_PARAMSFILE} ${MARKETPLACE_ITEM_NAME_TEST} + calm get apps --name ${AHV_APP_TEST_NAME} + +### PUBLISHING NOTE ### +## The Python script used below sanitizes bp platform data during publishing. Needed in cases where the bp was first created - +## on a project that has the same account that will be used when launched from the marketplace, e.g., bp created on project X, account X with project environment Y and will be +## launched from mp on project X, account X with project environment Z, the bp values, with project environment Y values baked-in, will overrride project environment Z values. +## In summary, without using the script for publishing, Calm Project Environment values will not be used if this is the case and values in the bp will - +## override the environment selected at mp launch. +publish-new-version-bp: + # Publish a new version of an existing blueprint to marketplace manager. This should only be executed from main stable branch. + # calm publish bp -v ${MP_GIT_TAG} --existing_markeplace_bp --with_secrets -n ${MARKETPLACE_ITEM_NAME} -i ${GLOBAL_MARKETPLACE_ICON_NAME} -d "$$(cat mp_meta/bp-description.md | sed /References:/Q)" --project ${MARKETPLACE_PRJ_LIST} --publish_to_marketplace ${BLUEPRINT_NAME} + # sanitize blueprint platform data prior to publishing so that environment configs can be populated at launch time. + python ../../admin_tools/publish_bp_to_mp.py ${DSL_INIT_PARAMS} -v ${MP_GIT_TAG} --existing_markeplace_bp --with_secrets -n ${MARKETPLACE_ITEM_NAME} -i ${GLOBAL_MARKETPLACE_ICON_NAME} -d "$$(cat mp_meta/bp-description.md | sed /References:/q)" --project ${MARKETPLACE_PRJ_LIST} --publish_to_marketplace --blueprint_name ${BLUEPRINT_NAME} --os LINUX --cat ${MARKETPLACE_CATEGORY} + calm get marketplace bps -n ${MARKETPLACE_ITEM_NAME} -a PUBLISHED +publish-new-version-test-bp: + # Publish a new version of an existing test blueprint to marketplace manager. This should only be executed from development branches for testing. + # calm publish bp -v ${MP_GIT_TAG} --existing_markeplace_bp --with_secrets -n ${MARKETPLACE_ITEM_NAME_TEST} -i ${GLOBAL_MARKETPLACE_ICON_NAME} -d "$$(cat mp_meta/bp-description.md | sed /References:/Q)" --project ${MARKETPLACE_PRJ_LIST} --publish_to_marketplace ${BLUEPRINT_NAME} + # sanitize blueprint platform data prior to publishing so that environment configs can be populated at launch time. + python ../../admin_tools/publish_bp_to_mp.py ${DSL_INIT_PARAMS} -v ${GIT_COMMIT_ID} --existing_markeplace_bp --with_secrets -n ${MARKETPLACE_ITEM_NAME_TEST} -i ${GLOBAL_MARKETPLACE_ICON_NAME} -d "$$(cat mp_meta/bp-description.md | sed /References:/q)" --project ${MARKETPLACE_PRJ_LIST} --publish_to_marketplace --blueprint_name ${BLUEPRINT_NAME} --os LINUX --cat ${MARKETPLACE_CATEGORY} + calm get marketplace bps -n ${MARKETPLACE_ITEM_NAME_TEST} -a PUBLISHED + +unpublish-mp-current-tag: + # Unpublish blueprints from marketplace store + calm unpublish marketplace bp -v ${MP_GIT_TAG} -s LOCAL ${MARKETPLACE_ITEM_NAME} + +delete-mp-current-tag: + # Delete approved marketplace item from store + calm delete marketplace bp -v ${MP_GIT_TAG} -s LOCAL ${MARKETPLACE_ITEM_NAME} + +unpublish-mp-previous-tag: + # Unpublish blueprints from marketplace store + calm unpublish marketplace bp -v ${MP_GIT_TAG_PREVIOUS} -s LOCAL ${MARKETPLACE_ITEM_NAME} + +delete-mp-previous-tag: + # Delete approved marketplace item from store + calm delete marketplace bp -v ${MP_GIT_TAG_PREVIOUS} -s LOCAL ${MARKETPLACE_ITEM_NAME} + +unpublish-test-mp-current-commitid: + # Unpublish blueprints from marketplace store + calm unpublish marketplace bp -v ${GIT_COMMIT_ID} -s LOCAL ${MARKETPLACE_ITEM_NAME_TEST} + +delete-test-mp-current-commitid: + # Delete approved marketplace item from store + calm delete marketplace bp -v ${GIT_COMMIT_ID} -s LOCAL ${MARKETPLACE_ITEM_NAME_TEST} + +unpublish-test-mp-previous-commitid: + # Unpublish blueprints from marketplace store + calm unpublish marketplace bp -v ${GIT_COMMIT_ID_PREVIOUS} -s LOCAL ${MARKETPLACE_ITEM_NAME_TEST} + +delete-test-mp-previous-commitid: + # Delete approved marketplace item from store + calm delete marketplace bp -v ${GIT_COMMIT_ID_PREVIOUS} -s LOCAL ${MARKETPLACE_ITEM_NAME_TEST} \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py b/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py new file mode 100644 index 0000000..72e8319 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py @@ -0,0 +1,266 @@ +""" +Multi-VM Blueprint for Linux on AHV Utilizing AI datasets +""" +import os +from pathlib import Path +import yaml, json +from calm.dsl.builtins import * +from calm.dsl.providers import get_provider +from helpers.general_utils import get_json_file_contents + +project_root = Path(__file__).parent.parent.parent.parent.parent +json_input = get_json_file_contents(f"{project_root}/config/edge-ai.json") +ACCOUNT_NAME = json_input["account_name"] +bp_input = json_input["bp_list"] + +for bp in bp_input: + if bp["name"] == "edge-ai-test": + #print('bp name is {}'.format(bp["name"])) + subnet_name = bp["subnet"] + #print('subnet is {}'.format(subnet_name)) + cluster_name = bp["cluster"] + #print('cluster is {}'.format(cluster_name)) + image_name = bp["image"] + #print('image is {}'.format(image_name)) + AHVProvider = get_provider("AHV_VM") + ApiObj = AHVProvider.get_api_obj() + acct_ref = Ref.Account(ACCOUNT_NAME) + acct_data = acct_ref.compile() + account_uuid = acct_data["uuid"] + res_subnets = ApiObj.subnets(account_uuid=account_uuid) + #print('subnet data is {}'.format(res_subnets)) + net_name_uuid_list = [] + for entity in res_subnets.get("entities", []): + if entity['status']['cluster_reference']['name'] == cluster_name and entity['status']['name'] == subnet_name: + x = {"name": entity['status']['name'], "uuid": entity['metadata']['uuid']} + net_name_uuid_list.append(x) + #print('net list is {}'.format(net_name_uuid_list)) + res_images = ApiObj.images(account_uuid=account_uuid) + image_name_uuid_list = [] + for entity in res_images.get("entities", []): + if entity['status']['name'] == image_name: + x = {"name": entity['status']['name'], "uuid": entity['metadata']['uuid']} + image_name_uuid_list.append(x) + #print('image list is {}'.format(image_name_uuid_list)) + else: + raise Exception("Cluster, Subnet or Image not specified") + +bp_root_folder = Path(__file__).parent.parent + +TSHIRT_SPEC_PATH = (f"{bp_root_folder}/tshirt-specs/tshirt_specs.yaml") +TSHIRT_SPECS = yaml.safe_load(read_file(TSHIRT_SPEC_PATH, depth=3)) +COMMON_TASK_LIBRARY = f"{bp_root_folder}/common_task_library" +INSTALL_SCRIPTS_DIRECTORY = f"{COMMON_TASK_LIBRARY}/install" +DAY2_SCRIPTS_DIRECTORY = f"{COMMON_TASK_LIBRARY}/day-two" + +# Secret Variables +if file_exists(f"{bp_root_folder}/.local/edge-key"): + BP_CRED_cred_os_KEY = read_local_file(f"{bp_root_folder}/.local/edge-key") + #print(BP_CRED_cred_os_KEY) +else: + BP_CRED_cred_os_KEY = "nutanix" +if file_exists(f"{bp_root_folder}/.local/edge-key.pub"): + BP_CRED_cred_os_public_KEY = read_local_file(f"{bp_root_folder}/.local/edge-key.pub") + #print(BP_CRED_cred_os_public_KEY) +else: + BP_CRED_cred_os_public_KEY = "nutanix" + +# Credentials +BP_CRED_cred_os = basic_cred("ubuntu",BP_CRED_cred_os_KEY,name="cred_os",type="KEY",default=True) + +class VM_Provision(Service): + @action + def NGTTools_Tasks(): + CalmTask.Exec.ssh(name="install_NGT",filename=INSTALL_SCRIPTS_DIRECTORY + "/ngt/install_ngt.sh",target=ref(VM_Provision),) + + @action + def Configure_VM(): + CalmTask.Exec.ssh(name="ssh_key_copy",filename=INSTALL_SCRIPTS_DIRECTORY + "/ssh_key_copy.sh",target=ref(VM_Provision),) + CalmTask.Exec.ssh(name="setup",filename=INSTALL_SCRIPTS_DIRECTORY + "/setup.sh",target=ref(VM_Provision),) + CalmTask.Exec.ssh(name="validate driver",filename=INSTALL_SCRIPTS_DIRECTORY + "/validate_driver.sh",target=ref(VM_Provision),) + +class AHVVM_Small(Substrate): + os_type = "Linux" + provider_type = "AHV_VM" + provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") + provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) + readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + # update CPU, Memory based on environment specific configs =============================================vvvvvv + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["memory_size_mib"] + # update nic ===========================================================================================vvvvvv + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) + # update image ==========================================================================================vvvvvv + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + +class AHVVM_Medium(AHVVM_Small): + provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") + provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) + readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + # update CPU, Memory based on environment specific configs =============================================vvvvvv + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["memory_size_mib"] + # update nic ===========================================================================================vvvvvv + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) + # update image ==========================================================================================vvvvvv + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + +class AHVVM_Large(AHVVM_Small): + provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") + provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) + readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + # update CPU, Memory based on environment specific configs =============================================vvvvvv + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["memory_size_mib"] + # update nic ===========================================================================================vvvvvv + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) + # update image ==========================================================================================vvvvvv + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + +class AHV_Package_Sml(Package): + services = [ref(VM_Provision)] + + @action + def __install__(): + VM_Provision.NGTTools_Tasks(name="Install NGT") + VM_Provision.Configure_VM(name="Configure VM") + +class AHV_Package_Med(AHV_Package_Sml): + services = [ref(VM_Provision)] + +class AHV_Package_Lrg(AHV_Package_Sml): + services = [ref(VM_Provision)] + +class AHV_Deployment_Sml(Deployment): + min_replicas = "1" + max_replicas = "100" + default_replicas = "@@{WORKER}@@" + packages = [ref(AHV_Package_Sml)] + substrate = ref(AHVVM_Small) + +class AHV_Deployment_Medium(Deployment): + min_replicas = "1" + max_replicas = "100" + default_replicas = "@@{WORKER}@@" + packages = [ref(AHV_Package_Med)] + substrate = ref(AHVVM_Medium) + +class AHV_Deployment_Large(Deployment): + min_replicas = "1" + max_replicas = "100" + default_replicas = "@@{WORKER}@@" + packages = [ref(AHV_Package_Lrg)] + substrate = ref(AHVVM_Large) + +class Common(Profile): + os_cred_public_key = CalmVariable.Simple.Secret(BP_CRED_cred_os_public_KEY,label="OS Cred Public Key",is_hidden=True,description="SSH public key for OS CRED user.") + NFS_PATH = CalmVariable.Simple("",label="NFS Share Path",regex="^(?:[0-9]{1,3}\.){3}[0-9]{1,3}:(\/[a-zA-Z0-9_-]+)+$",validate_regex=True,is_mandatory=True,is_hidden=False,runtime=True,description="Enter the path to your IP NFS share. For example 10.10.10.10:/sharename") + NFS_MOUNT_POINT = CalmVariable.Simple("/mnt/data",label="NFS Mount Point",is_mandatory=False,is_hidden=True,runtime=False,description="Local NFS Mount Point") + WORKER = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="") + NVIDIA_DRIVER_VERSION = CalmVariable.WithOptions.Predefined.string(["515.86.01"],label="Please select the NVidia driver version to be used.",default="515.86.01",is_mandatory=True,is_hidden=False,runtime=True,description="",) + NFS_WORKING_DIRECTORY = CalmVariable.WithOptions(["nai-dl-bench-"],label="AI Training Working Directory",default="nai-dl-bench-",is_mandatory=True,is_hidden=False,runtime=True,description="",) + + @action + def NaiDlBench_Data_Setup(name="NAI DL Bench Data Setup"): + CalmTask.Exec.ssh(name="NaiDlBench_Data_Setup",filename=DAY2_SCRIPTS_DIRECTORY + "/nai_dl_bench_data.sh",target=ref(VM_Provision),) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) + + @action + def AITraining(name="AI Training"): + CalmTask.Exec.ssh(name="AI Training",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_training.sh",target=ref(VM_Provision),) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("training-output",label="AI Training Output Folder",is_mandatory=True,is_hidden=False,runtime=True,description="",) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("resnet.pth",label="AI Training Output File",is_mandatory=True,is_hidden=False,runtime=True,description="",) + EXTRA_PARAMS = CalmVariable.Simple("",label="AI Training Optional Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) + + @action + def AIBatchInference(name="AI Batch Inference"): + CalmTask.Exec.ssh(name="AI Batch Inference",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference.sh",target=ref(VM_Provision),) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("",label="AI Training Output Folder",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) + AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output",label="AI Training Output Folder Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output Folder",) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("",label="AI Training Output File",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) + AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth",label="AI Training Output File Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output File",) + EXTRA_PARAMS = CalmVariable.Simple("",label="AI Inference Optional Extra Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) + AI_MODEL_NAME = CalmVariable.Simple("",label="AI Training Model Name",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value",) + AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50",label="AI Training Model Name Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI model name",) + + @action + def AIStartInferenceService(name="AI Start Inference Service"): + CalmTask.Exec.ssh(name="AI Start Inference Service",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_start.sh",target=ref(VM_Provision),) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("",label="AI Training Output Folder",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) + AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output",label="AI Training Output Folder Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output Folder",) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("",label="AI Training Output File",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) + AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth",label="AI Training Output File Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output File",) + EXTRA_PARAMS = CalmVariable.Simple("",label="AI Inference Optional Extra Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) + AI_MODEL_NAME = CalmVariable.Simple("",label="AI Training Model Name",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value",) + AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50",label="AI Training Model Name Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI model name",) + + @action + def AIStopInferenceService(name="AI Stop Inference Service"): + CalmTask.Exec.ssh(name="AI Stop Inference Service",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_stop.sh",target=ref(VM_Provision),) + +class AHV_Small(Common): + deployments = [AHV_Deployment_Sml] + + @action + def Scaleout(name="Scale Out"): + increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Sml),) + + @action + def Scalein(name="Scale In"): + decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Sml),) + +class AHV_Medium(Common): + deployments = [AHV_Deployment_Medium] + + @action + def Scaleout(name="Scale Out"): + increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Medium),) + + @action + def Scalein(name="Scale In"): + decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Medium),) + +class AHV_Large(Common): + deployments = [AHV_Deployment_Large] + + @action + def Scaleout(name="Scale Out"): + increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Large),) + + @action + def Scalein(name="Scale In"): + decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Large),) + +class Linux(Blueprint): + + services = [VM_Provision] + packages = [AHV_Package_Sml, AHV_Package_Med, AHV_Package_Lrg] + substrates = [AHVVM_Small, AHVVM_Medium, AHVVM_Large] + profiles = [AHV_Small, AHV_Medium, AHV_Large ] + credentials = [BP_CRED_cred_os] + +Linux.__doc__ = read_file('mp_meta/bp-description.md') + +def main(): + print(Linux.json_dumps(pprint=True)) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/bp-description.md b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/bp-description.md new file mode 100644 index 0000000..67e83ce --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/bp-description.md @@ -0,0 +1,20 @@ + +This blueprint deploys an Ubuntu virtual machine with NVidia GPU drivers for AI training and Inference Day2 actions. + +#### Operating Systems Supported + +* Ubuntu 20.04 + +#### Platform Profiles + +* Nutanix AHV + +#### Profiles Supported + +* Small - 8 CPU - 16 GB Memory +* Medium - 16 CPU - 24 GB Memory +* Large - 24 CPU - 36 GB Memory + +#### Legal Terms + +#### References diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/red-hat-app-icon.png b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/red-hat-app-icon.png new file mode 100644 index 0000000..56c2157 Binary files /dev/null and b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/red-hat-app-icon.png differ diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/ubuntu.png b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/ubuntu.png new file mode 100644 index 0000000..5f3cd07 Binary files /dev/null and b/calm-dsl-bps/blueprints/edge_ai/linux/mp_meta/ubuntu.png differ diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/specs/ahv-provider-spec.yaml b/calm-dsl-bps/blueprints/edge_ai/linux/specs/ahv-provider-spec.yaml new file mode 100644 index 0000000..cef7b20 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/specs/ahv-provider-spec.yaml @@ -0,0 +1,65 @@ +categories: + OSType: Linux +name: vm_@@{calm_application_name}@@-@@{calm_array_index}@@ +resources: + boot_config: + boot_device: + disk_address: + adapter_type: SCSI + device_index: 0 + gpu_list: + - vendor: NVIDIA + mode: PASSTHROUGH_COMPUTE + device_id: 8433 + disk_list: + - data_source_reference: + kind: image + name: replaced-in-bp + uuid: replaced-in-bp + device_properties: + device_type: DISK + disk_address: + adapter_type: SCSI + device_index: 0 + disk_size_mib: 0 + - device_properties: + device_type: CDROM + disk_address: + adapter_type: IDE + device_index: 0 + disk_size_mib: 0 + guest_customization: + cloud_init: + user_data: |- + #cloud-config + runcmd: + - sudo sed -i "$ a PubkeyAcceptedAlgorithms +ssh-rsa\nHostKeyAlgorithms +ssh-rsa\nPubkeyAcceptedKeyTypes +ssh-rsa" /etc/ssh/sshd_config + - sudo systemctl restart sshd + disable_root: False + hostname: @@{name}@@ + users: + - default + - name: @@{cred_os.username}@@ + shell: /bin/bash + homedir: /home/@@{cred_os.username}@@ + ssh-authorized-keys: + - @@{os_cred_public_key}@@ + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh_pwauth: false + package_upgrade: true + memory_size_mib: replaced-in-bp + nic_list: + - network_function_nic_type: INGRESS + nic_type: NORMAL_NIC + subnet_reference: + kind: subnet + name: replaced-in-bp + uuid: replaced-in-bp + num_sockets: replaced-in-bp + num_vcpus_per_socket: replaced-in-bp + guest_tools: + nutanix_guest_tools: + iso_mount_state: MOUNTED + state: ENABLED + enabled_capability_list: + - VSS_SNAPSHOT diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/specs/create_spec_editables.yaml b/calm-dsl-bps/blueprints/edge_ai/linux/specs/create_spec_editables.yaml new file mode 100644 index 0000000..3b0443c --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/specs/create_spec_editables.yaml @@ -0,0 +1,11 @@ +cluster_reference: true +resources: + nic_list: + '0': + subnet_reference: true + vpc_reference: true + gpu_list: + '0': + vendor: true + mode : true + device_id: true diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/specs/vmcalm_array_indexcalm_time_cloud_init_data.yaml b/calm-dsl-bps/blueprints/edge_ai/linux/specs/vmcalm_array_indexcalm_time_cloud_init_data.yaml new file mode 100644 index 0000000..9a839c2 --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/linux/specs/vmcalm_array_indexcalm_time_cloud_init_data.yaml @@ -0,0 +1,6 @@ +|- +#cloud-config +hostname: @@{name}@@ +ssh_pwauth: false +ssh_authorized_keys: + - @@{cred_os.public_key}@@ diff --git a/calm-dsl-bps/blueprints/edge_ai/tshirt-specs/tshirt_specs.yaml b/calm-dsl-bps/blueprints/edge_ai/tshirt-specs/tshirt_specs.yaml new file mode 100644 index 0000000..a9a1a7b --- /dev/null +++ b/calm-dsl-bps/blueprints/edge_ai/tshirt-specs/tshirt_specs.yaml @@ -0,0 +1,15 @@ +linux-os: + global: + tshirt_sizes: + small: + num_sockets: 8 + num_vcpus_per_socket: 1 + memory_size_mib: 16384 + medium: + num_sockets: 16 + num_vcpus_per_socket: 1 + memory_size_mib: 24576 + large: + num_sockets: 24 + num_vcpus_per_socket: 1 + memory_size_mib: 36864 \ No newline at end of file diff --git a/config/README.md b/config/README.md new file mode 100644 index 0000000..6cc692a --- /dev/null +++ b/config/README.md @@ -0,0 +1,85 @@ +## GitOps + +### Prerequisite + +We need a Ubuntu VM, which is configured as a self-hosted runner. This VM, should have network connectivity to the +Prism-Central and the clusters in consideration. +Click [here](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners) to read how to +configure self-hosted runner to your repository. + +### Required parameters + +Read about the required parameters to run the framework [here](../README.md#framework-usage). + +### Github pipelines + +We can use **Github workflows/ pipelines** to manage GitOps i.e using this repository to manage Infrastructure as a +Code (IaaC), that gets triggered from Github. +> A **Github workflow** is a configurable automated process that will run one or more jobs. Workflows are defined by a +> YAML file checked in to your repository and will run when triggered by an event in your repository, or they can be +> triggered manually, or at a defined schedule. + +### How to auto-trigger pre-configured workflows + +There are several pre-configured **Github workflows/ pipelines**. All the below mentioned workflows are triggered by the +approval of a PR containing the configuration file changes. +Here is the step-by-step guide. + +- Create a new branch/ use an existing branch other than `main`. +- Modify any of the following pre-configured configured files [config/new-site.json](new-site.json) + , [config/pod-config.yml](pod-config.yml), [config/create-vm-workloads.json](create-vm-workloads.json) + , [config/edge-ai.json](edge-ai.json) with the corresponding configuration changes for your environment. +- Create a **Pull-Request (PR)** from this branch to **main**. +- Get an approval for the changes. + +That's it. The corresponding workflow/s kick in. Below is the details of workflows that are triggered by the +configuration files changes. + +- **Imaging workflow** - This Github workflow gets triggered when the file [config/new-site.json](new-site.json) is part + of the PR. This Github workflow will setup the necessary Python environment and calls the framework as below + ```sh + > cd framework && python main.py --workflow imaging -f config/new-site.json + ``` +- **Pod config workflow** - This Github workflow gets triggered when the file [config/pod-config.yml](pod-config.yml) is + part of the PR. This Github workflow will setup the necessary Python environment and calls the framework as below + ```sh + > cd framework && python main.py --workflow pod-config -f config/pod-config.yml + ``` +- **VM workloads workflow** - This Github workflow gets triggered when the + file [config/create_workloads.json](create-vm-workloads.json) is part of the PR. This Github workflow will setup the + necessary Python environment and calls the framework as below + ```sh + > cd framework && python main.py --workflow calm-vm-workloads -f config/create-vm-workloads.json + ``` +- **Edge AI workload workflow** - This Github workflow gets triggered when the file [config/edge-ai.json](edge-ai.json) + is part of the PR. This Github workflow will setup the necessary Python environment and calls the framework as below + ```sh + > cd framework && python main.py --workflow calm-edgeai-vm-workload -f config/edge-ai.json + ``` + +> Note: The PR can contain configuration changes in multiple files [config/new-site.json](new-site.json) +> , [config/pod-config.yml](pod-config.yml), [config/create_workloads.json](create-vm-workloads.json) +> , [config/edge-ai.json](edge-ai.json). In this scenario, the workflows are triggered one after the other in the below +> order. +> **Imaging workflow -> Pod config workflow -> VM workloads workflow -> Edge AI workload workflow**. + +## Snapshots of auto-trigger + +- Modify any of the files configured for auto-trigger (**config/new-site.json/ config/pod-config.yml/ + config/create_workloads.json**) and commit to a new branch. + ![](../.github/images/new-branch.png) +- Create a PR. + ![](../.github/images/new-pr.png) +- Get approval for the PR. This triggers the workflow. + ![](../.github/images/approve-pr.png) +- Then monitor the workflow from `Actions` tab under `Trigger Python Workflows` section, click on latest workflow run + and expand `Run python script` for streaming logs. + ![](../.github/images/workflow.png) +- The logs and input config file, used for the run will be pushed back to the branch that triggered the run. + ![](../.github/images/logs.png) + +## Example of triggering workflows manually from the Github UI + +- This workflow can be manually triggered from the `Actions` Tab in Github. + ![](../.github/images/actions.png) + ![](../.github/images/run_wf.png) diff --git a/config/create-vm-workloads.json b/config/create-vm-workloads.json new file mode 100644 index 0000000..ace4dbb --- /dev/null +++ b/config/create-vm-workloads.json @@ -0,0 +1,38 @@ +{ + "site_name": "site-01", + // these parameters can be defined in global, if defined these can be skipped + "pc_ip": "valid-ip", + "pc_username": "pc-user", + "pc_password": "pc-password", + + // project to deploy the cluster/s + "project_name": "default", + + // Account name + "account_name": "NTNX_LOCAL_AZ", + + // these cluster and subnet references will be added to the project + "subnets": { + // cluster-name : subnet + "cluster-01": ["vlan110", "vlan112"], + "cluster-02": ["vlan110"] + }, + + // BP list + "bp_list": [ + { + "dsl_file": "calm-dsl-bps/blueprints/LAMP/LAMP.py", + "name": "LAMP-dsl", + "app_name": "LAMP-dsl", + "cluster": "cluster-02", + "subnet": "vlan110" + }, + { + "dsl_file": "calm-dsl-bps/blueprints/LAMP/LAMP.py", + "name": "LAMP-dsl", + "app_name": "LAMP-dsl", + "cluster": "cluster-01", + "subnet": "vlan112" + } + ] +} \ No newline at end of file diff --git a/config/edge-ai.json b/config/edge-ai.json new file mode 100644 index 0000000..d3ff4e1 --- /dev/null +++ b/config/edge-ai.json @@ -0,0 +1,41 @@ +{ + "site_name": "site-01", + // these parameters can be defined in global, if defined these can be skipped + "pc_ip": "valid-ip", + "pc_username": "pc-user", + "pc_password": "pc-password", + // project to deploy the cluster/s + "project_name": "default", + // Account name + "account_name": "NTNX_LOCAL_AZ", + // these cluster and subnet references will be added to the project + "subnets": { + // cluster-name : subnet + "cluster-01": [ + "vlan110", + "vlan112" + ], + "cluster-02": [ + "vlan110" + ] + }, + // BP list + "bp_list": [ + { + "dsl_file": "calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py", + "name": "edge-ai-test", + "app_name": "edge-ai-dsl", + "cluster": "cluster-02", + "subnet": "vlan110", + "image": "ubuntu_2204_519041_100gb", + "variable_list": [ + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Small", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Small", "name": "WORKER"}, + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Medium", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Medium", "name": "WORKER"}, + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Large", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Large", "name": "WORKER"} + ] + } + ] +} \ No newline at end of file diff --git a/config/example-configs/create-vm-workloads.json b/config/example-configs/create-vm-workloads.json new file mode 100644 index 0000000..ace4dbb --- /dev/null +++ b/config/example-configs/create-vm-workloads.json @@ -0,0 +1,38 @@ +{ + "site_name": "site-01", + // these parameters can be defined in global, if defined these can be skipped + "pc_ip": "valid-ip", + "pc_username": "pc-user", + "pc_password": "pc-password", + + // project to deploy the cluster/s + "project_name": "default", + + // Account name + "account_name": "NTNX_LOCAL_AZ", + + // these cluster and subnet references will be added to the project + "subnets": { + // cluster-name : subnet + "cluster-01": ["vlan110", "vlan112"], + "cluster-02": ["vlan110"] + }, + + // BP list + "bp_list": [ + { + "dsl_file": "calm-dsl-bps/blueprints/LAMP/LAMP.py", + "name": "LAMP-dsl", + "app_name": "LAMP-dsl", + "cluster": "cluster-02", + "subnet": "vlan110" + }, + { + "dsl_file": "calm-dsl-bps/blueprints/LAMP/LAMP.py", + "name": "LAMP-dsl", + "app_name": "LAMP-dsl", + "cluster": "cluster-01", + "subnet": "vlan112" + } + ] +} \ No newline at end of file diff --git a/config/example-configs/edge-ai.json b/config/example-configs/edge-ai.json new file mode 100644 index 0000000..d3ff4e1 --- /dev/null +++ b/config/example-configs/edge-ai.json @@ -0,0 +1,41 @@ +{ + "site_name": "site-01", + // these parameters can be defined in global, if defined these can be skipped + "pc_ip": "valid-ip", + "pc_username": "pc-user", + "pc_password": "pc-password", + // project to deploy the cluster/s + "project_name": "default", + // Account name + "account_name": "NTNX_LOCAL_AZ", + // these cluster and subnet references will be added to the project + "subnets": { + // cluster-name : subnet + "cluster-01": [ + "vlan110", + "vlan112" + ], + "cluster-02": [ + "vlan110" + ] + }, + // BP list + "bp_list": [ + { + "dsl_file": "calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py", + "name": "edge-ai-test", + "app_name": "edge-ai-dsl", + "cluster": "cluster-02", + "subnet": "vlan110", + "image": "ubuntu_2204_519041_100gb", + "variable_list": [ + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Small", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Small", "name": "WORKER"}, + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Medium", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Medium", "name": "WORKER"}, + { "value": { "value": "10.10.1.1:/ai" }, "context": "AHV_Large", "name": "NFS_PATH"}, + { "value": { "value": "1" }, "context": "AHV_Large", "name": "WORKER"} + ] + } + ] +} \ No newline at end of file diff --git a/config/example-configs/new-site.json b/config/example-configs/new-site.json new file mode 100644 index 0000000..2db55e3 --- /dev/null +++ b/config/example-configs/new-site.json @@ -0,0 +1,72 @@ +{ + // Site name + "site_name": "site-1", + // List of block serials, available nodes in the provided block serials will be used for cluster deployment + "blocks_serial_numbers": [], + // To re-use the existing network setting set use_existing_network_settings to ture + "use_existing_network_settings": false, + // the below section can be empty if use_existing_network_settings is set to true + "network": { + // Provide start and end ip address for MGMT static IPs + // Provide Gateway and Netmask for management network + // mgmt_static_ips = [start_ip, end_ip] + "mgmt_static_ips": [], + "mgmt_gateway": "", + "mgmt_netmask": "", + // IPMI IPs are optional, if not provided the existing IPMI will be re-used + // Provide start and end ip address for IPMI static IPs + // Provide Gateway and Netmask for IPMI network + // ipmi_static_ips = [start_ip, end_ip] + "ipmi_static_ips": [], + "ipmi_gateway": "", + "ipmi_netmask": "" + }, + // set re-image to true for re-imaging with provided aos, ahv versions + "re-image": true, + // the below section can be empty if re-image is not required + "imaging_parameters": { + "aos_version": "6.5.1.8", + "hypervisor_type": "kvm", + "hypervisor_version": "20201105.30411" + }, + // Cluster details for deployment + "clusters": { + // Cluster name and its details + "cluster-01": { + // Cluster size will be the number of nodes for deployment + "cluster_size": 1, + "cluster_vip": "x.x.x.x", + // cvm_ram is Gigabytes. Minimum 12, no maximum. Default set it to 12. + "cvm_ram": 12, + // Nutanix supports RF2, and also RF3 only if the cluster has 5+ nodes + "redundancy_factor": 2 + }, + "cluster-02": { + "cluster_size": 3, + "cluster_vip": "x.x.x.x", + // Provided node serials will be used for this cluster deployment. (Optional) + // Node serials need not be a part of blocks_serial_numbers mentioned above + "node_serials": [], + // Cluster Specific network setting. (Optional) + "network": { + // Provide start and end ip address for MGMT static IPs + // Provide Gateway and Netmask for management network + // mgmt_static_ips = [start_ip, end_ip] + "mgmt_static_ips": [], + "mgmt_gateway": "", + "mgmt_netmask": "", + // IPMI IPs are optional, if not provided the existing IPMI will be re-used + // Provide start and end ip address for IPMI static IPs + // Provide Gateway and Netmask for IPMI network + // ipmi_static_ips = [start_ip, end_ip] + "ipmi_static_ips": [], + "ipmi_gateway": "", + "ipmi_netmask": "" + }, + // Re-image can be set to true or false for this particular cluster + "re-image": false, + "cvm_ram": 24, + "redundancy_factor": 2 + } + } +} diff --git a/config/example-configs/pod-config.yml b/config/example-configs/pod-config.yml new file mode 100644 index 0000000..01ab9ec --- /dev/null +++ b/config/example-configs/pod-config.yml @@ -0,0 +1,304 @@ +--- +# Global variables, which can be inherited in clusters +pc_creds: &pc_creds + pc_username: pc-user + pc_password: pc-password + +pe_creds: &pe_creds + pe_username: pe-user + pe_password: pe-password + +ad_config: &ad_config + directory_services: + directory_type: ACTIVE_DIRECTORY # only ACTIVE_DIRECTORY is supported, port 389 will be used for LDAP + ad_name: name + ad_domain: eng.company.com + ad_server_ip: valid-ip + service_account_username: username + service_account_password: password + role_mappings: + - role_type: ROLE_USER_ADMIN # one of 'ROLE_CLUSTER_ADMIN', 'ROLE_USER_ADMIN', 'ROLE_CLUSTER_VIEWER', 'ROLE_BACKUP_ADMIN' + entity_type: GROUP # one of GROUP, OU, USER + values: + - john_doe + - john_smith + +cluster_eula: &eula + eula: + username: Nutanix + company_name: Nutanix + job_title: Backend Engineer + +pulse: &pulse + enable_pulse: true + +pe_container: &pe_container + name: Automation-container + # All these below parameters are optional + #storage_pool_uuid: uuid # Which storage pool to use, comment it to auto pick storage pool + reserved_in_gb: 1 # Represents the minimum exclusively reserved storage capacity available for the storage container + advertisedCapacity_in_gb: 1 # Represents the maximum storage capacity available for the storage container to use + replication_factor: 2 # Number of data copies to maintain + compression_enabled: true # Only Inline compression is supported yet + compression_delay_in_secs: 0 + erasure_code: "OFF" # OFF/ ON # Erasure code requires a minimum of 4 nodes when using RF2 and a minimum of 6 nodes when using RF3 + on_disk_dedup: "OFF" # OFF/ ON # Deduplication is not supported with fewer than 3 nodes + nfsWhitelistAddress: [ ] # Access list for storage container + +pe_networks: &pe_networks + name: "vlan-110" + subnet_type: VLAN # only VLAN is supported yet + vlan_id: 110 + network_ip: valid-ip + network_prefix: 24 + default_gateway_ip: valid-ip + # comment pool_list section if there are no pools for the subnet + pool_list: + - range: "valid-ip-start valid-ip-end" # Eg "10.10.10.31 10.10.10.40" + # comment dhcp_options section if you don't want dhcp. Over-riding dhcp is not supported yet + dhcp_options: + domain_name_server_list: [ 10.10.10.10 ] + domain_search_list: [ eng.company.com ] + domain_name: eng.company.com + +pods: + - AZ01: + pc_ip: valid-local-pc-ip + # Using globally declared pc credentials + <<: *pc_creds + remote_azs: + # remote AZ details, only Physical location is supported yet + valid-remote-pc-ip: + username: remote-pc-user + password: remote-pc-password + # To create categories in current PC + categories: + # Scenario 1, add values to an existing category + - name: AppType # name of existing category + description: "AppType CalmAppliance" + values: [ "CalmAppliance" ] + # Scenario 2, create a new category with values + - name: AZ01-DR-01 + description: "AZ01-DR-01 RPO1h" + values: [ "RPO1h" ] + # To create policies in current PC + protection_rules: + - name: AZ01-AZ02-Calm + desc: "Example Protection Rule for CalmAppliance" + protected_categories: + AppType: + - CalmAppliance + schedules: + - source: + # Source should always be Local AZ i.e local PC + availability_zone: valid-local-pc-ip + cluster: source-cluster + destination: + # Source should always be one of the remote AZs + availability_zone: valid-remote-pc-ip + cluster: destination-cluster + protection_type: ASYNC # ASYNC/ SYNC + # if protection_type is SYNC + #auto_suspend_timeout: 10 + # if protection_type is ASYNC + rpo: 1 + rpo_unit: HOUR # MINUTE/HOUR/DAY/WEEK + snapshot_type: "CRASH_CONSISTENT" # APPLICATION_CONSISTENT/CRASH_CONSISTENT + local_retention_policy: + # For Linear Retention type (Retains the n most recent snapshots. A value of 12 means that the 12 most recent snapshots are retained) + num_snapshots: 1 + # For Roll-up retention type (Maintains a rolling window of snapshots for every schedule, starting with the hourly schedule and ending with the schedule created for the specified retention period) + #rollup_retention_policy: + #snapshot_interval_type: YEARLY # DAILY/WEEKLY/MONTHLY/YEARLY + #multiple: 2 + remote_retention_policy: + # For Linear Retention type + num_snapshots: 1 + # For Roll-up retention type + #rollup_retention_policy: + #snapshot_interval_type: YEARLY # HOURLY/DAILY/WEEKLY/MONTHLY/YEARLY + #multiple: 2 + recovery_plans: + - name: AZ01-RP-Calm + desc: "Example Recovery plan for AppType CalmAppliance" + primary_location: + # Primary location is set to Local AZ + availability_zone: valid-local-pc-ip + # cluster: bucky01-dev # Optional. Required only for Local AZ to Local AZ + recovery_location: + availability_zone: valid-remote-pc-ip + # cluster: bucky02-dev # Optional. Required only for Local AZ to Local AZ + stages: + #- vms: + #- name: ubuntu-01 + #enable_script_exec: true + #delay: 2 + - categories: + - key: AppType + value: CalmAppliance + network_type: NON_STRETCH # NON_STRETCH/STRETCH + network_mappings: + - primary: + test: + name: valid-subnet-name + #gateway_ip: gateway_ip Optional + #prefix: network_prefix + prod: + name: valid-subnet-name + #gateway_ip: gateway_ip + #prefix: network_prefix + recovery: + test: + name: vlan110 + #gateway_ip: gateway_ip + #prefix: network_prefix + prod: + name: vlan110 + #gateway_ip: gateway_ip + #prefix: network_prefix + address_groups: + - name: AD + description: "Example AD Address Groups" + subnets: + - network_ip: valid-ip # Eg: 10.10.10.130 + network_prefix: valid-prefix # Eg: 32 + - name: Calm + description: "Example Calm Address Groups" + subnets: + - network_ip: valid-ip # Eg: 10.10.10.130 + network_prefix: valid-prefix # Eg: 32 + service_groups: + - name: ngt + description: Example Service Group NGT - TCP + service_details: + tcp: + - "2074" + - name: dns + description: Example Service Group DNS - UDP + service_details: + udp: + - "53" + security_policies: + - name: Example-AZ01-Calm + description: Example Security Policy + allow_ipv6_traffic: true # true/ false # Policy rules apply only to IPv4 Traffic and all IPv6 traffic are blocked by default. + hitlog: true # true/ false # Log traffic flow hits on the policy rules + # Only app rules are supported for now + app_rule: + policy_mode: MONITOR # APPLY/MONITOR + # Secure this app + target_group: + categories: + AppType: AZ01LAMP01 + inbounds: + - categories: + AppTier: + - WEB + address: + name: Calm + protocol: + service: + name: ssh + - categories: + AppTier: + - APP + udp: + - start_port: 82 + end_port: 8080 + address: + name: Calm + protocol: + service: + name: ssh + - categories: + AppTier: + - DB + address: + name: Calm + protocol: + service: + name: ssh + outbounds: + - address: + name: NVD_AD + protocol: + service: + name: dns + clusters: + # specify the names of clusters if they are already registered to a PC defined above in the AZ + # else specify the ip if they need to be registered to the PC + valid-pe-ip-or-name: + name: cluster-01 # Optional if name is already provided above + # Use global pe creds + <<: *pe_creds + # Use global eula config + <<: *eula + # Use global pulse config + <<: *pulse + # Use global ad config + <<: *ad_config + dsip: valid-ip + networks: + # Use global network config + - <<: *pe_networks + containers: + # Use global storage container config + - <<: *pe_container + valid-pe-ip-or-name: + name: cluster-02 # Optional if name is already provided above + # Use global pe creds + <<: *pe_creds + # Over-ride global eula config + eula: + username: username + company_name: Nutanix + job_title: title + # Use global pulse config + <<: *pulse + # Over-ride global ad config + ad_config: + directory_services: + directory_type: ACTIVE_DIRECTORY # only ACTIVE_DIRECTORY is supported, port 389 will be used for LDAP + ad_name: some-other-name + ad_domain: eng.company.com + ad_server_ip: valid-ip + service_account_username: username + service_account_password: password + role_mappings: + - role_type: ROLE_USER_ADMIN # one of 'ROLE_CLUSTER_ADMIN', 'ROLE_USER_ADMIN', 'ROLE_CLUSTER_VIEWER', 'ROLE_BACKUP_ADMIN' + entity_type: GROUP # one of GROUP, OU, USER + values: + - yash + - naveen + dsip: valid-ip + networks: + # Use global network config and add another network + - <<: *pe_networks + - name: "vlan-200" + subnet_type: VLAN # only VLAN is supported yet + vlan_id: 200 + network_ip: valid-ip + network_prefix: 24 + default_gateway_ip: valid-ip + # comment pool_list section if there are no pools for the subnet + pool_list: + - range: "valid-ip-start valid-ip-end" # Eg "10.10.10.31 10.10.10.40" + # comment dhcp_options section if you don't want dhcp. Over-riding dhcp is not supported yet + dhcp_options: + domain_name_server_list: [ 10.10.10.10 ] + domain_search_list: [ eng.company.com ] + domain_name: eng.company.com + containers: + # Use global storage container config and add another container + - <<: *pe_container + - name: Automation-container-2 + # All these below parameters are optional + #storage_pool_uuid: uuid # Which storage pool to use, comment it to auto pick storage pool + reserved_in_gb: 1 # Represents the minimum exclusively reserved storage capacity available for the storage container + advertisedCapacity_in_gb: 1 # Represents the maximum storage capacity available for the storage container to use + replication_factor: 2 # Number of data copies to maintain + compression_enabled: true # Only Inline compression is supported yet + compression_delay_in_secs: 0 + erasure_code: "OFF" # OFF/ ON # Erasure code requires a minimum of 4 nodes when using RF2 and a minimum of 6 nodes when using RF3 + on_disk_dedup: "OFF" # OFF/ ON # Deduplication is not supported with fewer than 3 nodes + nfsWhitelistAddress: [ ] # Access list for storage container \ No newline at end of file diff --git a/config/global.json b/config/global.json new file mode 100644 index 0000000..492d7fa --- /dev/null +++ b/config/global.json @@ -0,0 +1,33 @@ +{ + // All the Python workflow runs are grouped by sites + // Different workflow operaition logs and configs are grouped by sites + "site_name": "site-01", + + // below declared parameters are accessible in all Python workflows + "pc_ip": "valid-ip", + "pc_username": "pc-user", + "pc_password": "pc-password", + + "global_network": { + "dns_servers": ["valid-ip1", "valid-ip2"], + "ntp_servers": ["0.us.pool.ntp.org"] + }, + + // Supported AOS versions + "aos_versions": { + "6.5.1.8": { + "url": "https://url", + "name": "AOS Upgrade/Installer - LTS" + } + }, + + // Supported Hypervisor versions + "hypervisors": { + "kvm": { + "20201105.30411": { + "url": "https://url", + "name": "AHV Bundle" + } + } + } +} diff --git a/config/new-site.json b/config/new-site.json new file mode 100644 index 0000000..2db55e3 --- /dev/null +++ b/config/new-site.json @@ -0,0 +1,72 @@ +{ + // Site name + "site_name": "site-1", + // List of block serials, available nodes in the provided block serials will be used for cluster deployment + "blocks_serial_numbers": [], + // To re-use the existing network setting set use_existing_network_settings to ture + "use_existing_network_settings": false, + // the below section can be empty if use_existing_network_settings is set to true + "network": { + // Provide start and end ip address for MGMT static IPs + // Provide Gateway and Netmask for management network + // mgmt_static_ips = [start_ip, end_ip] + "mgmt_static_ips": [], + "mgmt_gateway": "", + "mgmt_netmask": "", + // IPMI IPs are optional, if not provided the existing IPMI will be re-used + // Provide start and end ip address for IPMI static IPs + // Provide Gateway and Netmask for IPMI network + // ipmi_static_ips = [start_ip, end_ip] + "ipmi_static_ips": [], + "ipmi_gateway": "", + "ipmi_netmask": "" + }, + // set re-image to true for re-imaging with provided aos, ahv versions + "re-image": true, + // the below section can be empty if re-image is not required + "imaging_parameters": { + "aos_version": "6.5.1.8", + "hypervisor_type": "kvm", + "hypervisor_version": "20201105.30411" + }, + // Cluster details for deployment + "clusters": { + // Cluster name and its details + "cluster-01": { + // Cluster size will be the number of nodes for deployment + "cluster_size": 1, + "cluster_vip": "x.x.x.x", + // cvm_ram is Gigabytes. Minimum 12, no maximum. Default set it to 12. + "cvm_ram": 12, + // Nutanix supports RF2, and also RF3 only if the cluster has 5+ nodes + "redundancy_factor": 2 + }, + "cluster-02": { + "cluster_size": 3, + "cluster_vip": "x.x.x.x", + // Provided node serials will be used for this cluster deployment. (Optional) + // Node serials need not be a part of blocks_serial_numbers mentioned above + "node_serials": [], + // Cluster Specific network setting. (Optional) + "network": { + // Provide start and end ip address for MGMT static IPs + // Provide Gateway and Netmask for management network + // mgmt_static_ips = [start_ip, end_ip] + "mgmt_static_ips": [], + "mgmt_gateway": "", + "mgmt_netmask": "", + // IPMI IPs are optional, if not provided the existing IPMI will be re-used + // Provide start and end ip address for IPMI static IPs + // Provide Gateway and Netmask for IPMI network + // ipmi_static_ips = [start_ip, end_ip] + "ipmi_static_ips": [], + "ipmi_gateway": "", + "ipmi_netmask": "" + }, + // Re-image can be set to true or false for this particular cluster + "re-image": false, + "cvm_ram": 24, + "redundancy_factor": 2 + } + } +} diff --git a/config/pod-config.yml b/config/pod-config.yml new file mode 100644 index 0000000..01ab9ec --- /dev/null +++ b/config/pod-config.yml @@ -0,0 +1,304 @@ +--- +# Global variables, which can be inherited in clusters +pc_creds: &pc_creds + pc_username: pc-user + pc_password: pc-password + +pe_creds: &pe_creds + pe_username: pe-user + pe_password: pe-password + +ad_config: &ad_config + directory_services: + directory_type: ACTIVE_DIRECTORY # only ACTIVE_DIRECTORY is supported, port 389 will be used for LDAP + ad_name: name + ad_domain: eng.company.com + ad_server_ip: valid-ip + service_account_username: username + service_account_password: password + role_mappings: + - role_type: ROLE_USER_ADMIN # one of 'ROLE_CLUSTER_ADMIN', 'ROLE_USER_ADMIN', 'ROLE_CLUSTER_VIEWER', 'ROLE_BACKUP_ADMIN' + entity_type: GROUP # one of GROUP, OU, USER + values: + - john_doe + - john_smith + +cluster_eula: &eula + eula: + username: Nutanix + company_name: Nutanix + job_title: Backend Engineer + +pulse: &pulse + enable_pulse: true + +pe_container: &pe_container + name: Automation-container + # All these below parameters are optional + #storage_pool_uuid: uuid # Which storage pool to use, comment it to auto pick storage pool + reserved_in_gb: 1 # Represents the minimum exclusively reserved storage capacity available for the storage container + advertisedCapacity_in_gb: 1 # Represents the maximum storage capacity available for the storage container to use + replication_factor: 2 # Number of data copies to maintain + compression_enabled: true # Only Inline compression is supported yet + compression_delay_in_secs: 0 + erasure_code: "OFF" # OFF/ ON # Erasure code requires a minimum of 4 nodes when using RF2 and a minimum of 6 nodes when using RF3 + on_disk_dedup: "OFF" # OFF/ ON # Deduplication is not supported with fewer than 3 nodes + nfsWhitelistAddress: [ ] # Access list for storage container + +pe_networks: &pe_networks + name: "vlan-110" + subnet_type: VLAN # only VLAN is supported yet + vlan_id: 110 + network_ip: valid-ip + network_prefix: 24 + default_gateway_ip: valid-ip + # comment pool_list section if there are no pools for the subnet + pool_list: + - range: "valid-ip-start valid-ip-end" # Eg "10.10.10.31 10.10.10.40" + # comment dhcp_options section if you don't want dhcp. Over-riding dhcp is not supported yet + dhcp_options: + domain_name_server_list: [ 10.10.10.10 ] + domain_search_list: [ eng.company.com ] + domain_name: eng.company.com + +pods: + - AZ01: + pc_ip: valid-local-pc-ip + # Using globally declared pc credentials + <<: *pc_creds + remote_azs: + # remote AZ details, only Physical location is supported yet + valid-remote-pc-ip: + username: remote-pc-user + password: remote-pc-password + # To create categories in current PC + categories: + # Scenario 1, add values to an existing category + - name: AppType # name of existing category + description: "AppType CalmAppliance" + values: [ "CalmAppliance" ] + # Scenario 2, create a new category with values + - name: AZ01-DR-01 + description: "AZ01-DR-01 RPO1h" + values: [ "RPO1h" ] + # To create policies in current PC + protection_rules: + - name: AZ01-AZ02-Calm + desc: "Example Protection Rule for CalmAppliance" + protected_categories: + AppType: + - CalmAppliance + schedules: + - source: + # Source should always be Local AZ i.e local PC + availability_zone: valid-local-pc-ip + cluster: source-cluster + destination: + # Source should always be one of the remote AZs + availability_zone: valid-remote-pc-ip + cluster: destination-cluster + protection_type: ASYNC # ASYNC/ SYNC + # if protection_type is SYNC + #auto_suspend_timeout: 10 + # if protection_type is ASYNC + rpo: 1 + rpo_unit: HOUR # MINUTE/HOUR/DAY/WEEK + snapshot_type: "CRASH_CONSISTENT" # APPLICATION_CONSISTENT/CRASH_CONSISTENT + local_retention_policy: + # For Linear Retention type (Retains the n most recent snapshots. A value of 12 means that the 12 most recent snapshots are retained) + num_snapshots: 1 + # For Roll-up retention type (Maintains a rolling window of snapshots for every schedule, starting with the hourly schedule and ending with the schedule created for the specified retention period) + #rollup_retention_policy: + #snapshot_interval_type: YEARLY # DAILY/WEEKLY/MONTHLY/YEARLY + #multiple: 2 + remote_retention_policy: + # For Linear Retention type + num_snapshots: 1 + # For Roll-up retention type + #rollup_retention_policy: + #snapshot_interval_type: YEARLY # HOURLY/DAILY/WEEKLY/MONTHLY/YEARLY + #multiple: 2 + recovery_plans: + - name: AZ01-RP-Calm + desc: "Example Recovery plan for AppType CalmAppliance" + primary_location: + # Primary location is set to Local AZ + availability_zone: valid-local-pc-ip + # cluster: bucky01-dev # Optional. Required only for Local AZ to Local AZ + recovery_location: + availability_zone: valid-remote-pc-ip + # cluster: bucky02-dev # Optional. Required only for Local AZ to Local AZ + stages: + #- vms: + #- name: ubuntu-01 + #enable_script_exec: true + #delay: 2 + - categories: + - key: AppType + value: CalmAppliance + network_type: NON_STRETCH # NON_STRETCH/STRETCH + network_mappings: + - primary: + test: + name: valid-subnet-name + #gateway_ip: gateway_ip Optional + #prefix: network_prefix + prod: + name: valid-subnet-name + #gateway_ip: gateway_ip + #prefix: network_prefix + recovery: + test: + name: vlan110 + #gateway_ip: gateway_ip + #prefix: network_prefix + prod: + name: vlan110 + #gateway_ip: gateway_ip + #prefix: network_prefix + address_groups: + - name: AD + description: "Example AD Address Groups" + subnets: + - network_ip: valid-ip # Eg: 10.10.10.130 + network_prefix: valid-prefix # Eg: 32 + - name: Calm + description: "Example Calm Address Groups" + subnets: + - network_ip: valid-ip # Eg: 10.10.10.130 + network_prefix: valid-prefix # Eg: 32 + service_groups: + - name: ngt + description: Example Service Group NGT - TCP + service_details: + tcp: + - "2074" + - name: dns + description: Example Service Group DNS - UDP + service_details: + udp: + - "53" + security_policies: + - name: Example-AZ01-Calm + description: Example Security Policy + allow_ipv6_traffic: true # true/ false # Policy rules apply only to IPv4 Traffic and all IPv6 traffic are blocked by default. + hitlog: true # true/ false # Log traffic flow hits on the policy rules + # Only app rules are supported for now + app_rule: + policy_mode: MONITOR # APPLY/MONITOR + # Secure this app + target_group: + categories: + AppType: AZ01LAMP01 + inbounds: + - categories: + AppTier: + - WEB + address: + name: Calm + protocol: + service: + name: ssh + - categories: + AppTier: + - APP + udp: + - start_port: 82 + end_port: 8080 + address: + name: Calm + protocol: + service: + name: ssh + - categories: + AppTier: + - DB + address: + name: Calm + protocol: + service: + name: ssh + outbounds: + - address: + name: NVD_AD + protocol: + service: + name: dns + clusters: + # specify the names of clusters if they are already registered to a PC defined above in the AZ + # else specify the ip if they need to be registered to the PC + valid-pe-ip-or-name: + name: cluster-01 # Optional if name is already provided above + # Use global pe creds + <<: *pe_creds + # Use global eula config + <<: *eula + # Use global pulse config + <<: *pulse + # Use global ad config + <<: *ad_config + dsip: valid-ip + networks: + # Use global network config + - <<: *pe_networks + containers: + # Use global storage container config + - <<: *pe_container + valid-pe-ip-or-name: + name: cluster-02 # Optional if name is already provided above + # Use global pe creds + <<: *pe_creds + # Over-ride global eula config + eula: + username: username + company_name: Nutanix + job_title: title + # Use global pulse config + <<: *pulse + # Over-ride global ad config + ad_config: + directory_services: + directory_type: ACTIVE_DIRECTORY # only ACTIVE_DIRECTORY is supported, port 389 will be used for LDAP + ad_name: some-other-name + ad_domain: eng.company.com + ad_server_ip: valid-ip + service_account_username: username + service_account_password: password + role_mappings: + - role_type: ROLE_USER_ADMIN # one of 'ROLE_CLUSTER_ADMIN', 'ROLE_USER_ADMIN', 'ROLE_CLUSTER_VIEWER', 'ROLE_BACKUP_ADMIN' + entity_type: GROUP # one of GROUP, OU, USER + values: + - yash + - naveen + dsip: valid-ip + networks: + # Use global network config and add another network + - <<: *pe_networks + - name: "vlan-200" + subnet_type: VLAN # only VLAN is supported yet + vlan_id: 200 + network_ip: valid-ip + network_prefix: 24 + default_gateway_ip: valid-ip + # comment pool_list section if there are no pools for the subnet + pool_list: + - range: "valid-ip-start valid-ip-end" # Eg "10.10.10.31 10.10.10.40" + # comment dhcp_options section if you don't want dhcp. Over-riding dhcp is not supported yet + dhcp_options: + domain_name_server_list: [ 10.10.10.10 ] + domain_search_list: [ eng.company.com ] + domain_name: eng.company.com + containers: + # Use global storage container config and add another container + - <<: *pe_container + - name: Automation-container-2 + # All these below parameters are optional + #storage_pool_uuid: uuid # Which storage pool to use, comment it to auto pick storage pool + reserved_in_gb: 1 # Represents the minimum exclusively reserved storage capacity available for the storage container + advertisedCapacity_in_gb: 1 # Represents the maximum storage capacity available for the storage container to use + replication_factor: 2 # Number of data copies to maintain + compression_enabled: true # Only Inline compression is supported yet + compression_delay_in_secs: 0 + erasure_code: "OFF" # OFF/ ON # Erasure code requires a minimum of 4 nodes when using RF2 and a minimum of 6 nodes when using RF3 + on_disk_dedup: "OFF" # OFF/ ON # Deduplication is not supported with fewer than 3 nodes + nfsWhitelistAddress: [ ] # Access list for storage container \ No newline at end of file diff --git a/framework/api/__init__.py b/framework/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/calm/__init__.py b/framework/calm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/calm/dsl/__init__.py b/framework/calm/dsl/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/calm/dsl/api/__init__.py b/framework/calm/dsl/api/__init__.py new file mode 100644 index 0000000..e47f38d --- /dev/null +++ b/framework/calm/dsl/api/__init__.py @@ -0,0 +1,8 @@ +from .handle import get_client_handle_obj, get_api_client +from .resource import get_resource_api + +__all__ = [ + "get_client_handle_obj", + "get_api_client", + "get_resource_api", +] diff --git a/framework/calm/dsl/api/access_control_policy.py b/framework/calm/dsl/api/access_control_policy.py new file mode 100644 index 0000000..54c9b1b --- /dev/null +++ b/framework/calm/dsl/api/access_control_policy.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class AccessControlPolicyAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="access_control_policies") diff --git a/framework/calm/dsl/api/app_icons.py b/framework/calm/dsl/api/app_icons.py new file mode 100644 index 0000000..8bf09b7 --- /dev/null +++ b/framework/calm/dsl/api/app_icons.py @@ -0,0 +1,28 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class AppIconAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="app_icons") + self.UPLOAD = self.PREFIX + "/upload" + self.IS_MARKETPLACE_ICON = self.PREFIX + "/{}/" + "is_marketplaceicon" + + def upload(self, icon_name, file_path): + data = {"name": icon_name} + files = {"image": (icon_name, open(file_path, "rb"), "image/jpeg")} + + return self.connection._call( + self.UPLOAD, + request_json=data, + files=files, + method=REQUEST.METHOD.POST, + verify=False, + ) + + def is_marketplace_icon(self, uuid): + return self.connection._call( + self.IS_MARKETPLACE_ICON.format(uuid), + verify=False, + method=REQUEST.METHOD.GET, + ) diff --git a/framework/calm/dsl/api/app_protection_policy.py b/framework/calm/dsl/api/app_protection_policy.py new file mode 100644 index 0000000..8f49d1b --- /dev/null +++ b/framework/calm/dsl/api/app_protection_policy.py @@ -0,0 +1,8 @@ +from .resource import ResourceAPI + + +class AppProtectionPolicyAPI(ResourceAPI): + def __init__(self, connection): + super().__init__( + connection, resource_type="app_protection_policies", calm_api=True + ) diff --git a/framework/calm/dsl/api/application.py b/framework/calm/dsl/api/application.py new file mode 100644 index 0000000..7227aa6 --- /dev/null +++ b/framework/calm/dsl/api/application.py @@ -0,0 +1,69 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class ApplicationAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="apps") + + self.ACTION_RUN = self.ITEM + "/actions/{}/run" + self.PATCH_RUN = self.ITEM + "/patch/{}/run" + self.DOWNLOAD_RUNLOG = self.ITEM + "/app_runlogs/{}/output/download" + self.ACTION_VARIABLE = self.ITEM + "/actions/{}/variables" + self.RECOVERY_GROUPS_LIST = self.ITEM + "/recovery_groups/list" + + def run_action(self, app_id, action_id, payload): + return self.connection._call( + self.ACTION_RUN.format(app_id, action_id), + request_json=payload, + verify=False, + method=REQUEST.METHOD.POST, + ) + + def run_patch(self, app_id, patch_id, payload): + return self.connection._call( + self.PATCH_RUN.format(app_id, patch_id), + request_json=payload, + verify=False, + method=REQUEST.METHOD.POST, + ) + + def poll_action_run(self, poll_url, payload=None): + if payload: + return self.connection._call( + poll_url, request_json=payload, verify=False, method=REQUEST.METHOD.POST + ) + else: + return self.connection._call( + poll_url, verify=False, method=REQUEST.METHOD.GET + ) + + def delete(self, app_id, soft_delete=False): + delete_url = self.ITEM.format(app_id) + if soft_delete: + delete_url += "?type=soft" + return self.connection._call( + delete_url, verify=False, method=REQUEST.METHOD.DELETE + ) + + def download_runlog(self, app_id, runlog_id): + download_url = self.DOWNLOAD_RUNLOG.format(app_id, runlog_id) + return self.connection._call( + download_url, method=REQUEST.METHOD.GET, verify=False + ) + + def action_variables(self, app_id, action_name): + action_var_url = self.ACTION_VARIABLE.format(app_id, action_name) + return self.connection._call( + action_var_url, method=REQUEST.METHOD.GET, verify=False + ) + + def get_recovery_groups(self, app_id, api_filter, length=250, offset=0): + payload = {"filter": api_filter, "length": length, "offset": offset} + recovery_groups_url = self.RECOVERY_GROUPS_LIST.format(app_id) + return self.connection._call( + recovery_groups_url, + request_json=payload, + verify=False, + method=REQUEST.METHOD.POST, + ) diff --git a/framework/calm/dsl/api/blueprint.py b/framework/calm/dsl/api/blueprint.py new file mode 100644 index 0000000..839fdea --- /dev/null +++ b/framework/calm/dsl/api/blueprint.py @@ -0,0 +1,256 @@ +from .resource import ResourceAPI +from .connection import REQUEST +from .util import strip_secrets, patch_secrets + + +class BlueprintAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="blueprints") + self.UPLOAD = self.PREFIX + "/import_json" + self.LAUNCH = self.ITEM + "/simple_launch" + self.FULL_LAUNCH = self.ITEM + "/launch" + self.MARKETPLACE_LAUNCH = self.PREFIX + "/marketplace_launch" + self.LAUNCH_POLL = self.ITEM + "/pending_launches/{}" + self.BP_EDITABLES = self.ITEM + "/runtime_editables" + self.EXPORT_JSON = self.ITEM + "/export_json" + self.EXPORT_JSON_WITH_SECRETS = self.ITEM + "/export_json?keep_secrets=true" + self.EXPORT_FILE = self.ITEM + "/export_file" + self.BROWNFIELD_VM_LIST = self.PREFIX + "/brownfield_import/vms/list" + self.PATCH_WITH_ENVIRONMENT = self.ITEM + "/patch_with_environment" + self.VARIABLE_VALUES = self.ITEM + "/variables/{}/values" + self.VARIABLE_VALUES_WITH_TRLID = ( + self.VARIABLE_VALUES + "?requestId={}&trlId={}" + ) + self.PROTECTION_POLICY_LIST = ( + self.ITEM + "/app_profile/{}/config_spec/{}/app_protection_policies/list" + ) + + # TODO https://jira.nutanix.com/browse/CALM-17178 + # Blueprint creation timeout is dependent on payload. + # So setting read timeout to 300 seconds + def upload(self, payload): + return self.connection._call( + self.UPLOAD, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + timeout=(5, 300), + ) + + def launch(self, uuid, payload): + return self.connection._call( + self.LAUNCH.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def full_launch(self, uuid, payload): + return self.connection._call( + self.FULL_LAUNCH.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def marketplace_launch(self, payload): + return self.connection._call( + self.MARKETPLACE_LAUNCH, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def patch_with_environment(self, uuid, payload): + return self.connection._call( + self.PATCH_WITH_ENVIRONMENT.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def poll_launch(self, blueprint_id, request_id): + return self.connection._call( + self.LAUNCH_POLL.format(blueprint_id, request_id), + verify=False, + method=REQUEST.METHOD.GET, + ) + + def _get_editables(self, bp_uuid): + return self.connection._call( + self.BP_EDITABLES.format(bp_uuid), verify=False, method=REQUEST.METHOD.GET + ) + + def brownfield_vms(self, payload): + # Adding refresh cache for call. As redis expiry is 10 mins. + payload["filter"] += ";refresh_cache==True" + return self.connection._call( + self.BROWNFIELD_VM_LIST, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def protection_policies( + self, bp_uuid, app_profile_uuid, config_uuid, env_uuid, length=250, offset=0 + ): + payload = { + "length": 250, + "offset": 0, + "filter": "environment_references=={}".format(env_uuid), + } + return self.connection._call( + self.PROTECTION_POLICY_LIST.format(bp_uuid, app_profile_uuid, config_uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + @staticmethod + def _make_blueprint_payload(bp_name, bp_desc, bp_resources, bp_metadata=None): + + if not bp_metadata: + bp_metadata = {"spec_version": 1, "name": bp_name, "kind": "blueprint"} + + bp_payload = { + "spec": { + "name": bp_name, + "description": bp_desc or "", + "resources": bp_resources, + }, + "metadata": bp_metadata, + "api_version": "3.0", + } + + return bp_payload + + def upload_with_secrets( + self, bp_name, bp_desc, bp_resources, bp_metadata=None, force_create=False + ): + + # check if bp with the given name already exists + params = {"filter": "name=={};state!=DELETED".format(bp_name)} + res, err = self.list(params=params) + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + if entities: + if len(entities) > 0: + if not force_create: + err_msg = "Blueprint {} already exists. Use --force to first delete existing blueprint before create.".format( + bp_name + ) + # ToDo: Add command to edit Blueprints + err = {"error": err_msg, "code": -1} + return None, err + + # --force option used in create. Delete existing blueprint with same name. + bp_uuid = entities[0]["metadata"]["uuid"] + _, err = self.delete(bp_uuid) + if err: + return None, err + + secret_map = {} + secret_variables = [] + object_lists = [ + "service_definition_list", + "package_definition_list", + "substrate_definition_list", + "app_profile_list", + "credential_definition_list", + ] + strip_secrets( + bp_resources, secret_map, secret_variables, object_lists=object_lists + ) + + # Handling vmware secrets + def strip_vmware_secrets(path_list, obj): + path_list.extend(["create_spec", "resources", "guest_customization"]) + obj = obj["create_spec"]["resources"]["guest_customization"] + + if "windows_data" in obj: + path_list.append("windows_data") + obj = obj["windows_data"] + + # Check for admin_password + if "password" in obj: + secret_variables.append( + (path_list + ["password"], obj["password"].pop("value", "")) + ) + obj["password"]["attrs"] = { + "is_secret_modified": False, + "secret_reference": None, + } + + # Now check for domain password + if obj.get("is_domain", False): + if "domain_password" in obj: + secret_variables.append( + ( + path_list + ["domain_password"], + obj["domain_password"].pop("value", ""), + ) + ) + obj["domain_password"]["attrs"] = { + "is_secret_modified": False, + "secret_reference": None, + } + + for obj_index, obj in enumerate( + bp_resources.get("substrate_definition_list", []) or [] + ): + if (obj["type"] == "VMWARE_VM") and (obj["os_type"] == "Windows"): + strip_vmware_secrets(["substrate_definition_list", obj_index], obj) + + upload_payload = self._make_blueprint_payload( + bp_name, bp_desc, bp_resources, bp_metadata + ) + + # TODO strip categories and add at updating time + bp_categories = upload_payload["metadata"].pop("categories", {}) + res, err = self.upload(upload_payload) + + if err: + return res, err + + # Add secrets and update bp + bp = res.json() + del bp["status"] + + patch_secrets(bp["spec"]["resources"], secret_map, secret_variables) + + # Adding categories at PUT call to blueprint + bp["metadata"]["categories"] = bp_categories + + # Update blueprint + update_payload = bp + uuid = bp["metadata"]["uuid"] + + return self.update(uuid, update_payload) + + def export_json(self, uuid): + url = self.EXPORT_JSON.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def export_json_with_secrets(self, uuid): + url = self.EXPORT_JSON_WITH_SECRETS.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def export_file(self, uuid): + return self.connection._call( + self.EXPORT_FILE.format(uuid), verify=False, method=REQUEST.METHOD.GET + ) + + def variable_values(self, uuid, var_uuid): + url = self.VARIABLE_VALUES.format(uuid, var_uuid) + return self.connection._call( + url, verify=False, method=REQUEST.METHOD.GET, ignore_error=True + ) + + def variable_values_from_trlid(self, uuid, var_uuid, req_id, trl_id): + url = self.VARIABLE_VALUES_WITH_TRLID.format(uuid, var_uuid, req_id, trl_id) + return self.connection._call( + url, verify=False, method=REQUEST.METHOD.GET, ignore_error=True + ) diff --git a/framework/calm/dsl/api/connection.py b/framework/calm/dsl/api/connection.py new file mode 100644 index 0000000..8cc3323 --- /dev/null +++ b/framework/calm/dsl/api/connection.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- +""" +connection: Provides a HTTP client to make requests to calm + +Example: + +pc_ip = "" +pc_port = 9440 +client = get_connection(pc_ip, pc_port, + auth=("", "")) + +""" + +import traceback +import json +import urllib3 +import sys + +from requests import Session as Session +from requests_toolbelt import MultipartEncoder +from requests.adapters import HTTPAdapter +from requests.exceptions import ConnectTimeout +from requests.packages.urllib3.util.retry import Retry + +from calm.dsl.log import get_logging_handle +from calm.dsl.config import get_context + +urllib3.disable_warnings() +LOG = get_logging_handle(__name__) + + +class REQUEST: + """Request related constants""" + + class SCHEME: + """ + Connection schemes + """ + + HTTP = "http" + HTTPS = "https" + + class AUTH_TYPE: + """ + Types of auth + """ + + NONE = "none" + BASIC = "basic" + JWT = "jwt" + + class METHOD: + """ + Request methods + """ + + DELETE = "delete" + GET = "get" + POST = "post" + PUT = "put" + + +def build_url(host, port, endpoint="", scheme=REQUEST.SCHEME.HTTPS): + """Build url. + + Args: + host (str): hostname/ip + port (int): port of the service + endpoint (str): url endpoint + scheme (str): http/https/tcp/udp + Returns: + Raises: + """ + url = "{scheme}://{host}".format(scheme=scheme, host=host) + if port is not None: + url += ":{port}".format(port=port) + url += "/{endpoint}".format(endpoint=endpoint) + return url + + +class Connection: + def __init__( + self, + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, + pool_maxsize=20, + pool_connections=20, + pool_block=True, + base_url="", + response_processor=None, + session_headers=None, + **kwargs, + ): + """Generic client to connect to server. + + Args: + host (str): Hostname/IP address + port (int): Port to connect to + pool_maxsize (int): The maximum number of connections in the pool + pool_connections (int): The number of urllib3 connection pools + to cache + pool_block (bool): Whether the connection pool should block + for connections + base_url (str): Base URL + scheme (str): http scheme (http or https) + response_processor (dict): response processor dict + session_headers (dict): session headers dict + auth_type (str): auth type that needs to be used by the client + auth (tuple): authentication + Returns: + Raises: + """ + self.base_url = base_url + self.host = host + self.port = port + self.session_headers = session_headers or {} + self._pool_maxsize = pool_maxsize + self._pool_connections = pool_connections + self._pool_block = pool_block + self.session = None + self.auth = auth + self.scheme = scheme + self.auth_type = auth_type + self.response_processor = response_processor + + def connect(self): + """Connect to api server, create http session pool. + + Args: + Returns: + api server session + Raises: + """ + + context = get_context() + connection_config = context.get_connection_config() + if connection_config["retries_enabled"]: + retry_strategy = Retry( + total=3, + status_forcelist=[429, 500, 502, 503, 504], + method_whitelist=[ + "GET", + "PUT", + "DELETE", + "POST", + ], + ) + http_adapter = HTTPAdapter( + pool_block=bool(self._pool_block), + pool_connections=int(self._pool_connections), + pool_maxsize=int(self._pool_maxsize), + max_retries=retry_strategy, + ) + + else: + http_adapter = HTTPAdapter( + pool_block=bool(self._pool_block), + pool_connections=int(self._pool_connections), + pool_maxsize=int(self._pool_maxsize), + ) + + self.session = Session() + if self.auth and self.auth_type == REQUEST.AUTH_TYPE.BASIC: + self.session.auth = self.auth + self.session.headers.update({"Content-Type": "application/json"}) + + self.session.mount("http://", http_adapter) + self.session.mount("https://", http_adapter) + self.base_url = build_url(self.host, self.port, scheme=self.scheme) + LOG.debug("{} session created".format(self.__class__.__name__)) + return self.session + + def close(self): + """ + Close the session. + + Args: + None + Returns: + None + """ + self.session.close() + + def _call( + self, + endpoint, + method=REQUEST.METHOD.POST, + cookies=None, + request_json=None, + request_params=None, + verify=True, + headers=None, + files=None, + ignore_error=False, + warning_msg="", + **kwargs, + ): + """Private method for making http request to calm + + Args: + endpoint (str): calm server endpoint + method (str): calm server http method + cookies (dict): cookies that need to be forwarded. + request_json (dict): request data + request_params (dict): request params + timeout (touple): (connection timeout, read timeout) + Returns: + (tuple (requests.Response, dict)): Response + """ + timeout = kwargs.get("timeout", None) + if not timeout: + context = get_context() + connection_config = context.get_connection_config() + timeout = ( + connection_config["connection_timeout"], + connection_config["read_timeout"], + ) + + if request_params is None: + request_params = {} + + request_json = request_json or {} + LOG.debug( + """Server Request- '{method}' at '{endpoint}' with body: + '{body}'""".format( + method=method, endpoint=endpoint, body=request_json + ) + ) + res = None + err = None + try: + res = None + url = build_url(self.host, self.port, endpoint=endpoint, scheme=self.scheme) + LOG.debug("URL is: {}".format(url)) + base_headers = self.session.headers + if headers: + base_headers.update(headers) + + if method == REQUEST.METHOD.POST: + if files is not None: + request_json.update(files) + m = MultipartEncoder(fields=request_json) + res = self.session.post( + url, + data=m, + verify=verify, + headers={"Content-Type": m.content_type}, + timeout=timeout, + ) + else: + res = self.session.post( + url, + params=request_params, + data=json.dumps(request_json), + verify=verify, + headers=base_headers, + cookies=cookies, + timeout=timeout, + ) + elif method == REQUEST.METHOD.PUT: + res = self.session.put( + url, + params=request_params, + data=json.dumps(request_json), + verify=verify, + headers=base_headers, + cookies=cookies, + timeout=timeout, + ) + elif method == REQUEST.METHOD.GET: + res = self.session.get( + url, + params=request_params or request_json, + verify=verify, + headers=base_headers, + cookies=cookies, + timeout=timeout, + ) + elif method == REQUEST.METHOD.DELETE: + res = self.session.delete( + url, + params=request_params, + data=json.dumps(request_json), + verify=verify, + headers=base_headers, + cookies=cookies, + timeout=timeout, + ) + res.raise_for_status() + if not url.endswith("/download"): + if not res.ok: + LOG.debug("Server Response: {}".format(res.json())) + except ConnectTimeout as cte: + LOG.error( + "Could not establish connection to server at https://{}:{}.".format( + self.host, self.port + ) + ) + LOG.debug("Error Response: {}".format(cte)) + sys.exit(-1) + except Exception as ex: + LOG.debug("Got traceback\n{}".format(traceback.format_exc())) + if hasattr(res, "json") and callable(getattr(res, "json")): + try: + err_msg = res.json() + except Exception: + err_msg = "{}".format(ex) + pass + elif hasattr(res, "text"): + err_msg = res.text + else: + err_msg = "{}".format(ex) + status_code = res.status_code if hasattr(res, "status_code") else 500 + err = {"error": err_msg, "code": status_code} + + if ignore_error: + if warning_msg: + LOG.warning(warning_msg) + return None, err + + LOG.error( + "Oops! Something went wrong.\n{}".format( + json.dumps(err, indent=4, separators=(",", ": ")) + ) + ) + + return res, err + + +_CONNECTION = None + + +def get_connection_obj( + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, +): + """Returns object of Connection class""" + + return Connection(host, port, auth_type, scheme, auth) + + +def get_connection_handle( + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, +): + """Get api server (aplos/styx) handle. + + Args: + host (str): Hostname/IP address + port (int): Port to connect to + auth_type (str): auth type that needs to be used by the client + scheme (str): http scheme (http or https) + session_headers (dict): session headers dict + auth (tuple): authentication + Returns: + Client handle + Raises: + Exception: If cannot connect + """ + global _CONNECTION + if not _CONNECTION: + update_connection_handle(host, port, auth_type, scheme, auth) + return _CONNECTION + + +def update_connection_handle( + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, +): + global _CONNECTION + _CONNECTION = Connection(host, port, auth_type, scheme=scheme, auth=auth) diff --git a/framework/calm/dsl/api/directory_service.py b/framework/calm/dsl/api/directory_service.py new file mode 100644 index 0000000..102ea9d --- /dev/null +++ b/framework/calm/dsl/api/directory_service.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class DirectoryServiceAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="directory_services") diff --git a/framework/calm/dsl/api/endpoint.py b/framework/calm/dsl/api/endpoint.py new file mode 100644 index 0000000..6ae66c3 --- /dev/null +++ b/framework/calm/dsl/api/endpoint.py @@ -0,0 +1,178 @@ +import os + +from .resource import ResourceAPI +from .connection import REQUEST +from .util import strip_secrets, patch_secrets +from calm.dsl.config import get_context +from .project import ProjectAPI + + +class EndpointAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="endpoints") + self.UPLOAD = self.PREFIX + "/import_json" + self.EXPORT_FILE = self.ITEM + "/export_file" + self.IMPORT_FILE = self.PREFIX + "/import_file" + self.EXPORT_JSON = self.ITEM + "/export_json" + self.EXPORT_JSON_WITH_SECRETS = self.ITEM + "/export_json?keep_secrets=true" + + def upload(self, payload): + return self.connection._call( + self.UPLOAD, verify=False, request_json=payload, method=REQUEST.METHOD.POST + ) + + @staticmethod + def _make_endpoint_payload( + endpoint_name, endpoint_desc, endpoint_resources, spec_version=None + ): + + endpoint_payload = { + "spec": { + "name": endpoint_name, + "description": endpoint_desc or "", + "resources": endpoint_resources, + }, + "metadata": { + "spec_version": spec_version or 1, + "name": endpoint_name, + "kind": "endpoint", + }, + "api_version": "3.0", + } + + return endpoint_payload + + def upload_with_secrets( + self, + endpoint_name, + endpoint_desc, + endpoint_resources, + force_create=False, + project_reference={}, + ): + + # check if endpoint with the given name already exists + params = {"filter": "name=={};deleted==FALSE".format(endpoint_name)} + res, err = self.list(params=params) + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + if entities: + if len(entities) > 0: + if not force_create: + err_msg = "Endpoint {} already exists. Use --force to first delete existing blueprint before create.".format( + endpoint_name + ) + err = {"error": err_msg, "code": -1} + return None, err + + # --force option used in create. Delete existing endpoint with same name. + ep_uuid = entities[0]["metadata"]["uuid"] + _, err = self.delete(ep_uuid) + if err: + return None, err + + secret_map = {} + secret_variables = [] + + strip_secrets(endpoint_resources["attrs"], secret_map, secret_variables) + endpoint_resources["attrs"].pop("default_credential_local_reference", None) + upload_payload = self._make_endpoint_payload( + endpoint_name, endpoint_desc, endpoint_resources + ) + project_name = "" + project_id = "" + if project_reference: + project_name = project_reference.get("name") + project_id = project_reference.get("uuid") + else: + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + projectObj = ProjectAPI(self.connection) + + # Fetch project details + params = {"filter": "name=={}".format(project_name)} + res, err = projectObj.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + if not entities: + raise Exception("No project with name {} exists".format(project_name)) + + project_id = entities[0]["metadata"]["uuid"] + + # Setting project reference + upload_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_id, + "name": project_name, + } + + res, err = self.upload(upload_payload) + + if err: + return res, err + + endpoint = res.json() + del endpoint["status"] + + # Add secrets and update endpoint + patch_secrets( + endpoint["spec"]["resources"]["attrs"], secret_map, secret_variables + ) + + # Update endpoint + uuid = endpoint["metadata"]["uuid"] + + return self.update(uuid, endpoint) + + def export_file(self, uuid, passphrase=None): + current_path = os.path.dirname(os.path.realpath(__file__)) + if passphrase: + res, err = self.connection._call( + self.EXPORT_FILE.format(uuid), + verify=False, + method=REQUEST.METHOD.POST, + request_json={"passphrase": passphrase}, + files=[], + ) + else: + res, err = self.connection._call( + self.EXPORT_FILE.format(uuid), verify=False, method=REQUEST.METHOD.GET + ) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + with open(current_path + "/" + uuid + ".json", "wb") as downloaded_file: + for chunk in res.iter_content(chunk_size=2048): + downloaded_file.write(chunk) + + return current_path + "/" + uuid + ".json" + + def import_file(self, file_path, name, project_uuid, passphrase=None): + + payload = {"name": name, "project_uuid": project_uuid} + if passphrase: + payload["passphrase"] = passphrase + files = {"file": ("file", open(file_path, "rb"))} + + return self.connection._call( + self.IMPORT_FILE, + verify=False, + files=files, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def export_json(self, uuid): + url = self.EXPORT_JSON.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def export_json_with_secrets(self, uuid): + url = self.EXPORT_JSON_WITH_SECRETS.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) diff --git a/framework/calm/dsl/api/environment.py b/framework/calm/dsl/api/environment.py new file mode 100644 index 0000000..5da60b7 --- /dev/null +++ b/framework/calm/dsl/api/environment.py @@ -0,0 +1,17 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class EnvironmentAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="environments") + + def list(self, params={}, ignore_error=False): + return self.connection._call( + self.LIST, + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ignore_error=ignore_error, + timeout=(5, 300), + ) diff --git a/framework/calm/dsl/api/handle.py b/framework/calm/dsl/api/handle.py new file mode 100644 index 0000000..a450f04 --- /dev/null +++ b/framework/calm/dsl/api/handle.py @@ -0,0 +1,124 @@ +from calm.dsl.config import get_context + +from .connection import ( + get_connection_obj, + get_connection_handle, + update_connection_handle, + REQUEST, +) +from .blueprint import BlueprintAPI +from .endpoint import EndpointAPI +from .runbook import RunbookAPI +from .library_tasks import TaskLibraryApi +from .application import ApplicationAPI +from .project import ProjectAPI +from .environment import EnvironmentAPI +from .setting import AccountsAPI +from .marketplace import MarketPlaceAPI +from .app_icons import AppIconAPI +from .version import VersionAPI +from .showback import ShowbackAPI +from .user import UserAPI +from .user_group import UserGroupAPI +from .role import RoleAPI +from .directory_service import DirectoryServiceAPI +from .access_control_policy import AccessControlPolicyAPI +from .app_protection_policy import AppProtectionPolicyAPI +from .job import JobAPI +from .tunnel import TunnelAPI +from .vm_recovery_point import VmRecoveryPointAPI +from .nutanix_task import TaskAPI +from .network_group import NetworkGroupAPI +from .resource_type import ResourceTypeAPI + + +class ClientHandle: + def __init__(self, connection): + self.connection = connection + + def _connect(self): + + self.connection.connect() + + # Note - add entity api classes here + self.project = ProjectAPI(self.connection) + self.environment = EnvironmentAPI(self.connection) + self.blueprint = BlueprintAPI(self.connection) + self.endpoint = EndpointAPI(self.connection) + self.runbook = RunbookAPI(self.connection) + self.task = TaskLibraryApi(self.connection) + self.application = ApplicationAPI(self.connection) + self.account = AccountsAPI(self.connection) + self.market_place = MarketPlaceAPI(self.connection) + self.app_icon = AppIconAPI(self.connection) + self.version = VersionAPI(self.connection) + self.showback = ShowbackAPI(self.connection) + self.user = UserAPI(self.connection) + self.group = UserGroupAPI(self.connection) + self.role = RoleAPI(self.connection) + self.directory_service = DirectoryServiceAPI(self.connection) + self.acp = AccessControlPolicyAPI(self.connection) + self.environment = EnvironmentAPI(self.connection) + self.app_protection_policy = AppProtectionPolicyAPI(self.connection) + self.job = JobAPI(self.connection) + self.tunnel = TunnelAPI(self.connection) + self.vm_recovery_point = VmRecoveryPointAPI(self.connection) + self.nutanix_task = TaskAPI(self.connection) + self.network_group = NetworkGroupAPI(self.connection) + self.resource_types = ResourceTypeAPI(self.connection) + + +def get_client_handle_obj( + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, +): + """returns object of ClientHandle class""" + + connection = get_connection_obj(host, port, auth_type, scheme, auth) + handle = ClientHandle(connection) + handle._connect() + return handle + + +_API_CLIENT_HANDLE = None + + +def update_api_client( + host, + port, + auth_type=REQUEST.AUTH_TYPE.BASIC, + scheme=REQUEST.SCHEME.HTTPS, + auth=None, +): + """updates global api client object (_API_CLIENT_HANDLE)""" + + global _API_CLIENT_HANDLE + + update_connection_handle(host, port, auth_type, scheme=scheme, auth=auth) + connection = get_connection_handle(host, port, auth_type, scheme, auth) + _API_CLIENT_HANDLE = ClientHandle(connection) + _API_CLIENT_HANDLE._connect() + + return _API_CLIENT_HANDLE + + +def get_api_client(): + """returns global api client object (_API_CLIENT_HANDLE)""" + + global _API_CLIENT_HANDLE + + if not _API_CLIENT_HANDLE: + context = get_context() + server_config = context.get_server_config() + + pc_ip = server_config.get("pc_ip") + pc_port = server_config.get("pc_port") + username = server_config.get("pc_username") + password = server_config.get("pc_password") + + update_api_client(host=pc_ip, port=pc_port, auth=(username, password)) + + return _API_CLIENT_HANDLE diff --git a/framework/calm/dsl/api/job.py b/framework/calm/dsl/api/job.py new file mode 100644 index 0000000..14c0770 --- /dev/null +++ b/framework/calm/dsl/api/job.py @@ -0,0 +1,17 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class JobAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="jobs") + self.INSTANCES = self.ITEM + "/instances" + + def instances(self, uuid, params={}, ignore_error=False): + return self.connection._call( + self.INSTANCES.format(uuid), + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ignore_error=ignore_error, + ) diff --git a/framework/calm/dsl/api/library_tasks.py b/framework/calm/dsl/api/library_tasks.py new file mode 100644 index 0000000..d6a428a --- /dev/null +++ b/framework/calm/dsl/api/library_tasks.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class TaskLibraryApi(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="app_tasks") diff --git a/framework/calm/dsl/api/marketplace.py b/framework/calm/dsl/api/marketplace.py new file mode 100644 index 0000000..2eb2ed0 --- /dev/null +++ b/framework/calm/dsl/api/marketplace.py @@ -0,0 +1,20 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class MarketPlaceAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="calm_marketplace_items") + self.VARIABLE_VALUES = self.ITEM + "/variables/{}/values" + + # https://jira.nutanix.com/browse/CALM-33232 + # Marketplace API taking more than 30s so setting it to 300s + def variable_values(self, uuid, var_uuid, payload={}): + url = self.VARIABLE_VALUES.format(uuid, var_uuid) + return self.connection._call( + url, + verify=False, + method=REQUEST.METHOD.POST, + request_json=payload, + timeout=(5, 300), + ) diff --git a/framework/calm/dsl/api/network_group.py b/framework/calm/dsl/api/network_group.py new file mode 100644 index 0000000..3dbce17 --- /dev/null +++ b/framework/calm/dsl/api/network_group.py @@ -0,0 +1,51 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class NetworkGroupAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="network_groups") + self.CREATE_TUNNEL = self.PREFIX + "/tunnels" + self.SETUP_TUNNEL = self.PREFIX + "/{}/tunnels" + self.RESET_TUNNEL = self.PREFIX + "/{}/tunnels" + self.APP_PENDING_LAUNCH = "api/nutanix/v3/blueprints/{}/pending_launches/{}" + self.DELETE_NG_TUNNEL = self.PREFIX + "/{}/tunnels/{}" + + def create_network_group_tunnel(self, payload): + return self.connection._call( + self.CREATE_TUNNEL, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def setup_network_group_tunnel(self, ng_uuid, payload): + return self.connection._call( + self.SETUP_TUNNEL.format(ng_uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def reset_network_group_tunnel_vm(self, ng_uuid, payload): + # Update tunnel_reference here + return self.connection._call( + self.RESET_TUNNEL.format(ng_uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def delete_tunnel(self, ng_uuid, tunnel_uuid): + return self.connection._call( + self.DELETE_NG_TUNNEL.format(ng_uuid, tunnel_uuid), + verify=False, + method=REQUEST.METHOD.DELETE, + ) + + def read_pending_task(self, uuid, task_uuid): + return self.connection._call( + self.APP_PENDING_LAUNCH.format(uuid, task_uuid), + verify=False, + method=REQUEST.METHOD.GET, + ) diff --git a/framework/calm/dsl/api/nutanix_task.py b/framework/calm/dsl/api/nutanix_task.py new file mode 100644 index 0000000..cb5eba2 --- /dev/null +++ b/framework/calm/dsl/api/nutanix_task.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class TaskAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="tasks") diff --git a/framework/calm/dsl/api/project.py b/framework/calm/dsl/api/project.py new file mode 100644 index 0000000..595dc16 --- /dev/null +++ b/framework/calm/dsl/api/project.py @@ -0,0 +1,144 @@ +from distutils.version import LooseVersion as LV + +from .resource import ResourceAPI +from .connection import REQUEST + + +class ProjectAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="projects") + + def create(self, payload): + + project_name = payload["spec"].get("name") or payload["metadata"].get("name") + + # check if project with the given name already exists + params = {"filter": "name=={}".format(project_name)} + res, err = self.list(params=params) + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + + if entities: + err_msg = "Project {} already exists.".format(project_name) + + err = {"error": err_msg, "code": -1} + return None, err + + return super().create(payload) + + def usage(self, uuid, payload): + + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + + if LV(calm_version) > LV("3.5.0"): + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/projects" + else: + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/calm_projects" + + CALM_PROJECTS_ITEM = CALM_PROJECTS_PREFIX + "/{}" + CALM_PROJECTS_USAGE = CALM_PROJECTS_ITEM + "/usage" + + return self.connection._call( + CALM_PROJECTS_USAGE.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def update(self, uuid, payload): + + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + if LV(calm_version) >= LV("3.5.2") and LV(calm_version) < LV("3.6.1"): + payload = get_projects_internal_payload(payload) + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/projects_internal" + else: + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/projects" + + CALM_PROJECTS_ITEM = CALM_PROJECTS_PREFIX + "/{}" + return self.connection._call( + CALM_PROJECTS_ITEM.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.PUT, + ) + + def delete(self, uuid): + + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + + if LV(calm_version) > LV("3.5.0"): + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/projects" + else: + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/calm_projects" + + CALM_PROJECTS_ITEM = CALM_PROJECTS_PREFIX + "/{}" + + return self.connection._call( + CALM_PROJECTS_ITEM.format(uuid), + verify=False, + method=REQUEST.METHOD.DELETE, + ) + + def read_pending_task(self, uuid, task_uuid): + + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + + if LV(calm_version) > LV("3.5.0"): + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/projects" + else: + CALM_PROJECTS_PREFIX = ResourceAPI.ROOT + "/calm_projects" + + CALM_PROJECTS_ITEM = CALM_PROJECTS_PREFIX + "/{}" + CALM_PROJECTS_PENDING_TASKS = CALM_PROJECTS_ITEM + "/pending_tasks/{}" + + return self.connection._call( + CALM_PROJECTS_PENDING_TASKS.format(uuid, task_uuid), + verify=False, + method=REQUEST.METHOD.GET, + ) + + # https://jira.nutanix.com/browse/CALM-32302 + # Project list timeout if we have more Projects. + # So setting read timeout to 300 seconds + def list(self, params={}, ignore_error=False): + return self.connection._call( + self.LIST, + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ignore_error=ignore_error, + timeout=(5, 300), + ) + + +def get_projects_internal_payload(payload): + """Modify projects paylaod to projects internal payload + + Args: + payload (dict): project payload + + Returns: + dict: projects internal payload + """ + + spec = payload["spec"] + spec["project_detail"] = {"name": spec["name"], "resources": spec["resources"]} + if "description" in spec: + spec["project_detail"]["description"] = spec["description"] + del spec["description"] + del spec["name"] + del spec["resources"] + payload["spec"] = spec + + return payload diff --git a/framework/calm/dsl/api/provider.py b/framework/calm/dsl/api/provider.py new file mode 100644 index 0000000..a890b66 --- /dev/null +++ b/framework/calm/dsl/api/provider.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class ProviderAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="providers", calm_api=True) diff --git a/framework/calm/dsl/api/resource.py b/framework/calm/dsl/api/resource.py new file mode 100644 index 0000000..720cb55 --- /dev/null +++ b/framework/calm/dsl/api/resource.py @@ -0,0 +1,142 @@ +from .connection import REQUEST + + +class ResourceAPI: + + ROOT = "api/nutanix/v3" + CALM_ROOT = "api/calm/v3.0" + + def __init__(self, connection, resource_type, calm_api=False): + self.connection = connection + self.PREFIX = (self.CALM_ROOT if calm_api else self.ROOT) + "/" + resource_type + self.LIST = self.PREFIX + "/list" + self.ITEM = self.PREFIX + "/{}" + + def create(self, payload): + return self.connection._call( + self.PREFIX, verify=False, request_json=payload, method=REQUEST.METHOD.POST + ) + + def read(self, id=None): + url = self.ITEM.format(id) if id else self.PREFIX + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def update(self, uuid, payload): + return self.connection._call( + self.ITEM.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.PUT, + ) + + def delete(self, uuid): + return self.connection._call( + self.ITEM.format(uuid), verify=False, method=REQUEST.METHOD.DELETE + ) + + def list(self, params={}, ignore_error=False): + return self.connection._call( + self.LIST, + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ignore_error=ignore_error, + ) + + def get_name_uuid_map(self, params={}): + res_entities, err = self.list_all(base_params=params, ignore_error=True) + + if not err: + response = res_entities + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + total_matches = len(response) + if total_matches == 0: + return {} + name_uuid_map = {} + + for entity in response: + entity_name = entity["status"]["name"] + entity_uuid = entity["metadata"]["uuid"] + + if entity_name in name_uuid_map: + uuid = name_uuid_map[entity_name] + + if type(uuid) is str: + uuids = uuid.split() + uuids.append(entity_uuid) + name_uuid_map[entity_name] = uuids + + elif type(uuid) is list: + uuid.append(entity_uuid) + name_uuid_map[entity_name] = uuid + + else: + name_uuid_map[entity_name] = entity_uuid + + return name_uuid_map + + def get_uuid_name_map(self, params={}): + res_entities, err = self.list_all(base_params=params, ignore_error=True) + if not err: + response = res_entities + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + total_matches = len(response) + if total_matches == 0: + return {} + + uuid_name_map = {} + for entity in response: + entity_name = entity["status"]["name"] + entity_uuid = entity["metadata"]["uuid"] + + uuid_name_map[entity_uuid] = entity_name + + return uuid_name_map + + # TODO: Fix return type of list_all helper + def list_all(self, api_limit=250, base_params=None, ignore_error=False): + """returns the list of entities""" + + final_list = [] + offset = 0 + if base_params is None: + base_params = {} + params = base_params.copy() + length = params.get("length", api_limit) + params["length"] = length + params["offset"] = offset + if params.get("sort_attribute", None) is None: + params["sort_attribute"] = "_created_timestamp_usecs_" + if params.get("sort_order", None) is None: + params["sort_order"] = "ASCENDING" + while True: + params["offset"] = offset + response, err = self.list(params, ignore_error=ignore_error) + if not err: + response = response.json() + else: + if ignore_error: + return [], err + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + final_list.extend(response["entities"]) + + total_matches = response["metadata"]["total_matches"] + if total_matches <= (length + offset): + break + + offset += length + + if ignore_error: + return final_list, None + + return final_list + + +def get_resource_api(resource_type, connection, calm_api=False): + return ResourceAPI(connection, resource_type, calm_api=calm_api) diff --git a/framework/calm/dsl/api/resource_type.py b/framework/calm/dsl/api/resource_type.py new file mode 100644 index 0000000..9905e2f --- /dev/null +++ b/framework/calm/dsl/api/resource_type.py @@ -0,0 +1,16 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class ResourceTypeAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="resource_types", calm_api=True) + self.TEST_RUNBOOK = self.PREFIX + "/{}/test_runbook/{}/run" + + def run_test_runbook(self, resource_type_id, action_id, payload): + return self.connection._call( + self.TEST_RUNBOOK.format(resource_type_id, action_id), + request_json=payload, + verify=False, + method=REQUEST.METHOD.POST, + ) diff --git a/framework/calm/dsl/api/role.py b/framework/calm/dsl/api/role.py new file mode 100644 index 0000000..02967ca --- /dev/null +++ b/framework/calm/dsl/api/role.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class RoleAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="roles") diff --git a/framework/calm/dsl/api/runbook.py b/framework/calm/dsl/api/runbook.py new file mode 100644 index 0000000..67e8bb6 --- /dev/null +++ b/framework/calm/dsl/api/runbook.py @@ -0,0 +1,436 @@ +import os +from distutils.version import LooseVersion as LV + + +from .resource import ResourceAPI +from .connection import REQUEST +from .util import strip_secrets, patch_secrets +from calm.dsl.config import get_context +from .project import ProjectAPI + + +class RunbookAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="runbooks") + self.UPLOAD = self.PREFIX + "/import_json" + self.UPDATE_USING_NAMES = self.PREFIX + "/{}/update" + self.RUNBOOK_RUNLOGS_LIST = self.PREFIX + "/runlogs/list" + self.RUN = self.PREFIX + "/{}/run" + self.EXECUTE = self.PREFIX + "/{}/execute" # For calm versions >= 3.3.2 + self.POLL_RUN = self.PREFIX + "/runlogs/{}" + self.PAUSE = self.PREFIX + "/runlogs/{}/pause" + self.PLAY = self.PREFIX + "/runlogs/{}/play" + self.RERUN = self.PREFIX + "/runlogs/{}/rerun" + self.RUNLOG_LIST = self.PREFIX + "/runlogs/{}/children/list" + self.RUNLOG_OUTPUT = self.PREFIX + "/runlogs/{}/children/{}/output" + self.RUNLOG_RESUME = self.PREFIX + "/runlogs/{}/children/{}/resume" + self.RUNLOG_ABORT = self.PREFIX + "/runlogs/{}/abort" + self.RUN_SCRIPT = self.PREFIX + "/{}/run_script" + self.RUN_SCRIPT_OUTPUT = self.PREFIX + "/{}/run_script/output/{}/{}" + self.EXPORT_FILE = self.ITEM + "/export_file" + self.IMPORT_FILE = self.PREFIX + "/import_file" + self.EXPORT_JSON = self.ITEM + "/export_json" + self.EXPORT_JSON_WITH_SECRETS = self.ITEM + "/export_json?keep_secrets=true" + self.MARKETPLACE_EXECUTE = self.PREFIX + "/marketplace_execute" + self.MARKETPLACE_CLONE = self.PREFIX + "/marketplace_clone" + self.VARIABLE_VALUES = self.ITEM + "/variables/{}/values" + + def upload(self, payload): + return self.connection._call( + self.UPLOAD, verify=False, request_json=payload, method=REQUEST.METHOD.POST + ) + + def resume(self, action_runlog_id, task_runlog_id, payload): + return self.connection._call( + self.RUNLOG_RESUME.format(action_runlog_id, task_runlog_id), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def pause(self, uuid): + return self.connection._call( + self.PAUSE.format(uuid), + verify=False, + request_json={}, + method=REQUEST.METHOD.POST, + ) + + def play(self, uuid): + return self.connection._call( + self.PLAY.format(uuid), + verify=False, + request_json={}, + method=REQUEST.METHOD.POST, + ) + + def rerun(self, uuid): + return self.connection._call( + self.RERUN.format(uuid), + verify=False, + request_json={}, + method=REQUEST.METHOD.POST, + ) + + def abort(self, uuid): + return self.connection._call( + self.RUNLOG_ABORT.format(uuid), + verify=False, + request_json={}, + method=REQUEST.METHOD.POST, + ) + + def update_using_name_reference(self, uuid, payload): + return self.connection._call( + self.UPDATE_USING_NAMES.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.PUT, + ) + + @staticmethod + def _make_runbook_payload( + runbook_name, runbook_desc, runbook_resources, spec_version=None + ): + + runbook_payload = { + "spec": { + "name": runbook_name, + "description": runbook_desc or "", + "resources": runbook_resources, + }, + "metadata": { + "spec_version": spec_version or 1, + "name": runbook_name, + "kind": "runbook", + }, + "api_version": "3.0", + } + + return runbook_payload + + def upload_with_secrets( + self, runbook_name, runbook_desc, runbook_resources, force_create=False + ): + + # check if runbook with the given name already exists + params = {"filter": "name=={};deleted==FALSE".format(runbook_name)} + res, err = self.list(params=params) + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + if entities: + if len(entities) > 0: + if not force_create: + err_msg = "Runbook {} already exists. Use --force to first delete existing runbook before create.".format( + runbook_name + ) + err = {"error": err_msg, "code": -1} + return None, err + + # --force option used in create. Delete existing runbook with same name. + rb_uuid = entities[0]["metadata"]["uuid"] + _, err = self.delete(rb_uuid) + if err: + return None, err + + secret_map = {} + secret_variables = [] + object_lists = [] + objects = ["runbook"] + + strip_secrets( + runbook_resources, + secret_map, + secret_variables, + object_lists=object_lists, + objects=objects, + ) + + endpoint_secret_map = {} + endpoint_secret_variables = {} + + for endpoint in runbook_resources.get("endpoint_definition_list"): + endpoint_name = endpoint.get("name") + endpoint_secret_map[endpoint_name] = {} + endpoint_secret_variables[endpoint_name] = [] + strip_secrets( + endpoint["attrs"], + endpoint_secret_map[endpoint_name], + endpoint_secret_variables[endpoint_name], + ) + endpoint["attrs"].pop("default_credential_local_reference", None) + + upload_payload = self._make_runbook_payload( + runbook_name, runbook_desc, runbook_resources + ) + + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + projectObj = ProjectAPI(self.connection) + + # Fetch project details + params = {"filter": "name=={}".format(project_name)} + res, err = projectObj.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + if not entities: + raise Exception("No project with name {} exists".format(project_name)) + + project_id = entities[0]["metadata"]["uuid"] + + # Setting project reference + upload_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_id, + "name": project_name, + } + + res, err = self.upload(upload_payload) + + if err: + return res, err + + runbook = res.json() + del runbook["status"] + + # Add secrets and update runbook + patch_secrets(runbook["spec"]["resources"], secret_map, secret_variables) + for endpoint in runbook["spec"]["resources"].get( + "endpoint_definition_list", [] + ): + endpoint_name = endpoint.get("name") + patch_secrets( + endpoint["attrs"], + endpoint_secret_map[endpoint_name], + endpoint_secret_variables[endpoint_name], + ) + + uuid = runbook["metadata"]["uuid"] + + # Update runbook + return self.update(uuid, runbook) + + def list_runbook_runlogs(self, params=None): + return self.connection._call( + self.RUNBOOK_RUNLOGS_LIST, + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ) + + def run(self, uuid, payload): + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + runbook_run_api = self.RUN + if LV(calm_version) >= LV("3.3.2"): + runbook_run_api = self.EXECUTE + + return self.connection._call( + runbook_run_api.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def run_script(self, uuid, payload): + return self.connection._call( + self.RUN_SCRIPT.format(uuid), + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def run_script_output(self, uuid, trl_id, request_id): + return self.connection._call( + self.RUN_SCRIPT_OUTPUT.format(uuid, trl_id, request_id), + verify=False, + method=REQUEST.METHOD.GET, + ) + + def list_runlogs(self, uuid): + return self.connection._call( + self.RUNLOG_LIST.format(uuid), + verify=False, + request_json={}, + method=REQUEST.METHOD.POST, + ) + + def runlog_output(self, action_runlog_id, task_runlog_id): + return self.connection._call( + self.RUNLOG_OUTPUT.format(action_runlog_id, task_runlog_id), + verify=False, + method=REQUEST.METHOD.GET, + ) + + def poll_action_run(self, uuid, payload=None): + if payload: + return self.connection._call( + self.POLL_RUN.format(uuid), + request_json=payload, + verify=False, + method=REQUEST.METHOD.POST, + ) + else: + return self.connection._call( + self.POLL_RUN.format(uuid), verify=False, method=REQUEST.METHOD.GET + ) + + def update_with_secrets( + self, uuid, runbook_name, runbook_desc, runbook_resources, spec_version + ): + + secret_map = {} + secret_variables = [] + object_lists = [] + objects = ["runbook"] + + strip_secrets( + runbook_resources, + secret_map, + secret_variables, + object_lists=object_lists, + objects=objects, + ) + + endpoint_secret_map = {} + endpoint_secret_variables = {} + + for endpoint in runbook_resources.get("endpoint_definition_list"): + endpoint_name = endpoint.get("name") + endpoint_secret_map[endpoint_name] = {} + endpoint_secret_variables[endpoint_name] = [] + strip_secrets( + endpoint["attrs"], + endpoint_secret_map[endpoint_name], + endpoint_secret_variables[endpoint_name], + ) + endpoint["attrs"].pop("default_credential_local_reference", None) + + update_payload = self._make_runbook_payload( + runbook_name, runbook_desc, runbook_resources, spec_version=spec_version + ) + + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + projectObj = ProjectAPI(self.connection) + + # Fetch project details + params = {"filter": "name=={}".format(project_name)} + res, err = projectObj.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + if not entities: + raise Exception("No project with name {} exists".format(project_name)) + + project_id = entities[0]["metadata"]["uuid"] + + # Setting project reference + update_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_id, + "name": project_name, + } + + res, err = self.update_using_name_reference(uuid, update_payload) + if err: + return res, err + + # Add secrets and update runbook + runbook = res.json() + del runbook["status"] + + # Update runbook + patch_secrets(runbook["spec"]["resources"], secret_map, secret_variables) + for endpoint in runbook["spec"]["resources"].get( + "endpoint_definition_list", [] + ): + endpoint_name = endpoint.get("name") + patch_secrets( + endpoint["attrs"], + endpoint_secret_map[endpoint_name], + endpoint_secret_variables[endpoint_name], + ) + + uuid = runbook["metadata"]["uuid"] + + return self.update(uuid, runbook) + + def export_file(self, uuid, passphrase=None): + current_path = os.path.dirname(os.path.realpath(__file__)) + if passphrase: + res, err = self.connection._call( + self.EXPORT_FILE.format(uuid), + verify=False, + method=REQUEST.METHOD.POST, + request_json={"passphrase": passphrase}, + files=[], + ) + else: + res, err = self.connection._call( + self.EXPORT_FILE.format(uuid), verify=False, method=REQUEST.METHOD.GET + ) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + with open(current_path + "/" + uuid + ".json", "wb") as downloaded_file: + for chunk in res.iter_content(chunk_size=2048): + downloaded_file.write(chunk) + + return current_path + "/" + uuid + ".json" + + def import_file(self, file_path, name, project_uuid, passphrase=None): + + payload = {"name": name, "project_uuid": project_uuid} + if passphrase: + payload["passphrase"] = passphrase + files = {"file": ("file", open(file_path, "rb"))} + + return self.connection._call( + self.IMPORT_FILE, + verify=False, + files=files, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def export_json(self, uuid): + url = self.EXPORT_JSON.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def export_json_with_secrets(self, uuid): + url = self.EXPORT_JSON_WITH_SECRETS.format(uuid) + return self.connection._call(url, verify=False, method=REQUEST.METHOD.GET) + + def marketplace_execute(self, payload): + return self.connection._call( + self.MARKETPLACE_EXECUTE, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def marketplace_clone(self, payload): + return self.connection._call( + self.MARKETPLACE_CLONE, + verify=False, + request_json=payload, + method=REQUEST.METHOD.POST, + ) + + def variable_values(self, uuid, var_uuid, payload={}): + url = self.VARIABLE_VALUES.format(uuid, var_uuid) + return self.connection._call( + url, verify=False, method=REQUEST.METHOD.POST, request_json=payload + ) diff --git a/framework/calm/dsl/api/setting.py b/framework/calm/dsl/api/setting.py new file mode 100644 index 0000000..2fdae7b --- /dev/null +++ b/framework/calm/dsl/api/setting.py @@ -0,0 +1,52 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class AccountsAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="accounts") + self.VERIFY = self.PREFIX + "/{}/verify" + self.VMS_LIST = self.ITEM + "/vms/list" + self.RESOURCE_TYPES_LIST_BASED_ON_ACCOUNT = ( + self.PREFIX + "/{}/resource_types/list" + ) + self.PLATFORM_SYNC = self.PREFIX + "/{}/sync" + + def verify(self, id): + return self.connection._call( + self.VERIFY.format(id), verify=False, method=REQUEST.METHOD.GET + ) + + def vms_list(self, id, params=dict()): + """returns the vms list for given account""" + + return self.connection._call( + self.VMS_LIST.format(id), + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ) + + def get_uuid_type_map(self, params=dict()): + """returns map containing {account_uuid: account_type} details""" + + res_entities, err = self.list_all(base_params=params, ignore_error=True) + if err: + raise Exception(err) + + uuid_type_map = {} + for entity in res_entities: + a_uuid = entity["metadata"]["uuid"] + a_type = entity["status"]["resources"]["type"] + uuid_type_map[a_uuid] = a_type + + return uuid_type_map + + def platform_sync(self, id): + """sync platform account""" + + return self.connection._call( + self.PLATFORM_SYNC.format(id), + verify=False, + method=REQUEST.METHOD.POST, + ) diff --git a/framework/calm/dsl/api/showback.py b/framework/calm/dsl/api/showback.py new file mode 100644 index 0000000..acd8592 --- /dev/null +++ b/framework/calm/dsl/api/showback.py @@ -0,0 +1,12 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class ShowbackAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="app_showback") + + def status(self): + return self.connection._call( + self.ITEM.format("status"), verify=False, method=REQUEST.METHOD.GET + ) diff --git a/framework/calm/dsl/api/tunnel.py b/framework/calm/dsl/api/tunnel.py new file mode 100644 index 0000000..90f9b20 --- /dev/null +++ b/framework/calm/dsl/api/tunnel.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class TunnelAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="tunnels") diff --git a/framework/calm/dsl/api/user.py b/framework/calm/dsl/api/user.py new file mode 100644 index 0000000..52044ce --- /dev/null +++ b/framework/calm/dsl/api/user.py @@ -0,0 +1,20 @@ +from .resource import ResourceAPI +from .connection import REQUEST + + +class UserAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="users") + + # Temporary hack to fix blocker CALM-32740 + def list(self, params={}, ignore_error=False): + params.pop("sort_attribute", None) + params.pop("sort_order", None) + return self.connection._call( + self.LIST, + verify=False, + request_json=params, + method=REQUEST.METHOD.POST, + ignore_error=ignore_error, + timeout=(5, 60), + ) diff --git a/framework/calm/dsl/api/user_group.py b/framework/calm/dsl/api/user_group.py new file mode 100644 index 0000000..10af4be --- /dev/null +++ b/framework/calm/dsl/api/user_group.py @@ -0,0 +1,88 @@ +from .resource import ResourceAPI + + +class UserGroupAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="user_groups") + + def get_name_uuid_map(self, params=dict()): + + res, err = self.list(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + name_uuid_map = {} + for entity in res["entities"]: + state = entity["status"]["state"] + if state != "COMPLETE": + continue + + e_resources = entity["status"]["resources"] + + directory_service_user_group = ( + e_resources.get("directory_service_user_group") or dict() + ) + distinguished_name = directory_service_user_group.get("distinguished_name") + + # For user-groups having caps in the name + try: + distinguished_name = entity["spec"]["resources"][ + "directory_service_user_group" + ]["distinguished_name"] + except Exception: + pass + + directory_service_ref = ( + directory_service_user_group.get("directory_service_reference") + or dict() + ) + directory_service_name = directory_service_ref.get("name", "") + + uuid = entity["metadata"]["uuid"] + + if directory_service_name and distinguished_name: + name_uuid_map[distinguished_name] = uuid + + return name_uuid_map + + def get_uuid_name_map(self, params=dict()): + + res, err = self.list(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + uuid_name_map = {} + for entity in res["entities"]: + state = entity["status"]["state"] + if state != "COMPLETE": + continue + + e_resources = entity["status"]["resources"] + + directory_service_user_group = ( + e_resources.get("directory_service_user_group") or dict() + ) + distinguished_name = directory_service_user_group.get("distinguished_name") + + # For user-groups having caps in the name + try: + distinguished_name = entity["spec"]["resources"][ + "directory_service_user_group" + ]["distinguished_name"] + except Exception: + pass + + directory_service_ref = ( + directory_service_user_group.get("directory_service_reference") + or dict() + ) + directory_service_name = directory_service_ref.get("name", "") + + uuid = entity["metadata"]["uuid"] + + if directory_service_name and distinguished_name: + uuid_name_map[uuid] = distinguished_name + + return uuid_name_map diff --git a/framework/calm/dsl/api/util.py b/framework/calm/dsl/api/util.py new file mode 100644 index 0000000..1ad1dea --- /dev/null +++ b/framework/calm/dsl/api/util.py @@ -0,0 +1,180 @@ +def strip_secrets(resources, secret_map, secret_variables, object_lists=[], objects=[]): + """ + Strips secrets from the resources + Args: + resources (dict): request payload + secret_map (dict): credential secret values + secret_variables (list): list of secret variables + Returns: None + """ + + # Remove creds before upload + creds = resources.get("credential_definition_list", []) or [] + default_creds = [] + for cred in creds: + name = cred["name"] + secret_map[name] = cred.pop("secret", {}) + # Explicitly set defaults so that secret is not created at server + # TODO - Fix bug in server: {} != None + cred["secret"] = { + "attrs": {"is_secret_modified": False, "secret_reference": None} + } + + # Remove creds from HTTP endpoints resources + auth = resources.get("authentication", {}) or {} + if auth.get("type", None) == "basic": + name = auth["username"] + secret_map[name] = auth.pop("password", {}) + auth["password"] = {"attrs": {"is_secret_modified": False, "value": None}} + + # Strip secret variable values + # TODO: Refactor and/or clean this up later + + def strip_entity_secret_variables(path_list, obj, field_name="variable_list"): + for var_idx, variable in enumerate(obj.get(field_name, []) or []): + if variable["type"] == "SECRET": + secret_variables.append( + (path_list + [field_name, var_idx], variable.pop("value")) + ) + variable["attrs"] = { + "is_secret_modified": False, + "secret_reference": None, + } + + def strip_action_secret_variables(path_list, obj): + for action_idx, action in enumerate(obj.get("action_list", []) or []): + runbook = action.get("runbook", {}) or {} + if not runbook: + return + strip_entity_secret_variables( + path_list + ["action_list", action_idx, "runbook"], runbook + ) + tasks = runbook.get("task_definition_list", []) + for task_idx, task in enumerate(tasks): + if task.get("type", None) != "HTTP": + continue + auth = (task.get("attrs", {}) or {}).get("authentication", {}) or {} + if auth.get("auth_type", None) == "basic": + secret_variables.append( + ( + path_list + + [ + "action_list", + action_idx, + "runbook", + "task_definition_list", + task_idx, + "attrs", + "authentication", + "basic_auth", + "password", + ], + auth["basic_auth"]["password"].pop("value"), + ) + ) + auth["basic_auth"]["password"] = { + "attrs": {"is_secret_modified": False, "secret_reference": None} + } + if not (task.get("attrs", {}) or {}).get("headers", []) or []: + continue + strip_entity_secret_variables( + path_list + + [ + "action_list", + action_idx, + "runbook", + "task_definition_list", + task_idx, + "attrs", + ], + task["attrs"], + field_name="headers", + ) + + def strip_runbook_secret_variables(path_list, obj): + tasks = obj.get("task_definition_list", []) + for task_idx, task in enumerate(tasks): + if task.get("type", None) != "HTTP": + continue + auth = (task.get("attrs", {}) or {}).get("authentication", {}) or {} + path_list = path_list + [ + "runbook", + "task_definition_list", + task_idx, + "attrs", + ] + strip_authentication_secret_variables( + path_list, task.get("attrs", {}) or {} + ) + if auth.get("auth_type", None) == "basic": + if not (task.get("attrs", {}) or {}).get("headers", []) or []: + continue + strip_entity_secret_variables( + path_list, task["attrs"], field_name="headers" + ) + + def strip_authentication_secret_variables(path_list, obj): + auth = obj.get("authentication", {}) + if auth.get("auth_type", None) == "basic": + secret_variables.append( + ( + path_list + ["authentication", "basic_auth", "password"], + auth["password"].pop("value"), + ) + ) + auth["password"] = {"attrs": {"is_secret_modified": False}} + + def strip_all_secret_variables(path_list, obj): + strip_entity_secret_variables(path_list, obj) + strip_action_secret_variables(path_list, obj) + strip_runbook_secret_variables(path_list, obj) + strip_authentication_secret_variables(path_list, obj) + + for object_list in object_lists: + for obj_idx, obj in enumerate(resources.get(object_list, []) or []): + strip_all_secret_variables([object_list, obj_idx], obj) + + # Currently, deployment actions and variables are unsupported. + # Uncomment the following lines if and when the API does support them. + # if object_list == "app_profile_list": + # for dep_idx, dep in enumerate(obj["deployment_create_list"]): + # strip_all_secret_variables( + # [object_list, obj_idx, "deployment_create_list", dep_idx], + # dep, + # ) + + for obj in objects: + strip_all_secret_variables([obj], resources.get(obj, {})) + + +def patch_secrets(resources, secret_map, secret_variables): + """ + Patches the secrests to payload + Args: + resources (dict): resources in API request payload + secret_map (dict): credential secret values + secret_variables (list): list of secret variables + Returns: + dict: payload with secrets patched + """ + + # Add creds back + creds = resources.get("credential_definition_list", []) + for cred in creds: + name = cred["name"] + cred["secret"] = secret_map[name] + + # Add creds back for HTTP endpoint + auth = resources.get("authentication", {}) + username = auth.get("username", "") + if username: + auth["password"] = secret_map[username] + + for path, secret in secret_variables: + variable = resources + for sub_path in path: + variable = variable[sub_path] + variable["attrs"] = {"is_secret_modified": True} + variable["value"] = secret + + return resources diff --git a/framework/calm/dsl/api/version.py b/framework/calm/dsl/api/version.py new file mode 100644 index 0000000..11d7cf8 --- /dev/null +++ b/framework/calm/dsl/api/version.py @@ -0,0 +1,23 @@ +from .connection import REQUEST + + +class VersionAPI: + def __init__(self, connection): + self.connection = connection + + self.calm_version = "apps/version" + self.pc_version = "PrismGateway/services/rest/v1/cluster/version" + + def get_calm_version(self): + return self.connection._call( + self.calm_version, verify=False, method=REQUEST.METHOD.GET + ) + + def get_pc_version(self): + return self.connection._call( + self.pc_version, + verify=False, + method=REQUEST.METHOD.GET, + ignore_error=True, + warning_msg="Could not get PC Version", + ) diff --git a/framework/calm/dsl/api/vm_recovery_point.py b/framework/calm/dsl/api/vm_recovery_point.py new file mode 100644 index 0000000..6c38001 --- /dev/null +++ b/framework/calm/dsl/api/vm_recovery_point.py @@ -0,0 +1,6 @@ +from .resource import ResourceAPI + + +class VmRecoveryPointAPI(ResourceAPI): + def __init__(self, connection): + super().__init__(connection, resource_type="nutanix/v1/vm_recovery_points") diff --git a/framework/calm/dsl/builtins/__init__.py b/framework/calm/dsl/builtins/__init__.py new file mode 100644 index 0000000..c0e560d --- /dev/null +++ b/framework/calm/dsl/builtins/__init__.py @@ -0,0 +1,226 @@ +# IMPORTANT NOTE: Order of imports here is important since every entity that +# has fields for actions, variables, etc. will be using the corresponding +# validator (subclassed from PropertyValidator). This requires the relevant +# subclass to already be present in PropertyValidatorBase's context. Moving +# the import for these below the entities will cause a TypeError. + +from .models.ref import ref, RefType +from .models.calm_ref import Ref +from .models.metadata import Metadata, MetadataType +from .models.variable import Variable, setvar, CalmVariable, VariableType +from .models.action import action, parallel, ActionType, get_runbook_action +from .models.credential import basic_cred, secret_cred, dynamic_cred, CredentialType + +from .models.task import Task, CalmTask, TaskType + +from .models.port import Port, port, PortType +from .models.service import ( + BaseService as Service, + service, + ServiceType, +) +from .models.published_service import PublishedService, published_service + +from .models.package import Package, package, PackageType + +from .models.utils import ( + read_file, + read_local_file, + read_env, + file_exists, + get_valid_identifier, +) + +from .models.provider_spec import provider_spec, read_provider_spec, read_spec +from .models.provider_spec import read_ahv_spec, read_vmw_spec +from .models.readiness_probe import ReadinessProbe, readiness_probe, ReadinessProbeType + +from .models.ahv_vm_cluster import ( + ahv_vm_cluster, + AhvCluster, + AhvClusterType, +) +from .models.ahv_vm_vpc import ( + ahv_vm_vpc, + AhvVpc, + AhvVpcType, +) +from .models.ahv_vm_nic import ahv_vm_nic, AhvVmNic, AhvNicType +from .models.ahv_vm_disk import ahv_vm_disk, AhvVmDisk, AhvDiskType +from .models.ahv_vm_gpu import ahv_vm_gpu, AhvVmGpu, AhvGpuType +from .models.ahv_vm_gc import ahv_vm_guest_customization, AhvVmGC, AhvGCType +from .models.ahv_vm import ( + ahv_vm_resources, + AhvVmResources, + ahv_vm, + AhvVm, + AhvVmType, + AhvVmResourcesType, +) +from .models.ahv_recovery_vm import AhvVmRecoveryResources, ahv_vm_recovery_spec + +from .models.substrate import Substrate, substrate, SubstrateType +from .models.deployment import Deployment, deployment, DeploymentType +from .models.pod_deployment import PODDeployment, pod_deployment + +from .models.config_attrs import AhvUpdateConfigAttrs, PatchDataField +from .models.app_protection import AppProtection +from .models.config_spec import ConfigSpecType +from .models.app_edit import AppEdit +from .models.patch_field import PatchField + +from .models.profile import Profile, profile, ProfileType + +from .models.config_spec import ( + UpdateConfig, +) + +from .models.blueprint import Blueprint, blueprint, BlueprintType + +from .models.simple_deployment import SimpleDeployment +from .models.simple_blueprint import SimpleBlueprint + +from .models.blueprint_payload import create_blueprint_payload +from .models.vm_disk_package import ( + vm_disk_package, + ahv_vm_disk_package, + VmDiskPackageType, +) + + +from .models.client_attrs import ( + init_dsl_metadata_map, + get_dsl_metadata_map, + update_dsl_metadata_map, +) + +from .models.providers import Provider +from .models.environment import Environment +from .models.environment_payload import create_environment_payload +from .models.project import Project, ProjectType +from .models.project_payload import create_project_payload +from .models.brownfield import Brownfield +from .models.endpoint import Endpoint, _endpoint, CalmEndpoint + +from .models.vm_profile import VmProfile +from .models.vm_blueprint import VmBlueprint +from .models.job import Job, JobScheduler + +from .models.network_group_tunnel_vm_spec import ( + NetworkGroupTunnelVMSpecType, + NetworkGroupTunnelVMSpec, + ahv_network_group_tunnel_vm_spec, +) +from .models.network_group_tunnel import NetworkGroupTunnelType, NetworkGroupTunnel +from .models.network_group_tunnel_payload import NetworkGroupTunnelPayloadType + + +__all__ = [ + "Ref", + "ref", + "RefType", + "basic_cred", + "secret_cred", + "dynamic_cred", + "CredentialType", + "Variable", + "setvar", + "CalmVariable", + "VariableType", + "Task", + "CalmTask", + "TaskType", + "action", + "ActionType", + "get_runbook_action", + "parallel", + "Port", + "port", + "PortType", + "Service", + "service", + "ServiceType", + "PublishedService", + "published_service", + "Package", + "package", + "PackageType", + "read_file", + "file_exists", + "read_local_file", + "read_env", + "vm_disk_package", + "VmDiskPackageType", + "ahv_vm_disk_package", + "provider_spec", + "read_provider_spec", + "read_ahv_spec", + "read_vmw_spec", + "Substrate", + "substrate", + "SubstrateType", + "Deployment", + "deployment", + "DeploymentType", + "PODDeployment", + "pod_deployment", + "read_spec", + "Profile", + "profile", + "ProfileType", + "Blueprint", + "blueprint", + "BlueprintType", + "create_blueprint_payload", + "SimpleDeployment", + "SimpleBlueprint", + "get_valid_identifier", + "ReadinessProbe", + "readiness_probe", + "ReadinessProbeType", + "ahv_vm_nic", + "AhvVmNic", + "ahv_vm_disk", + "AhvVmDisk", + "ahv_vm_gpu", + "AhvVmGpu", + "ahv_vm_guest_customization", + "AhvVmGC", + "ahv_vm_resources", + "AhvVmResources", + "ahv_vm", + "AhvVm", + "AhvNicType", + "AhvDiskType", + "AhvGpuType", + "AhvGCType", + "AhvVmResourcesType", + "AhvVmType", + "init_dsl_metadata_map", + "get_dsl_metadata_map", + "update_dsl_metadata_map", + "Provider", + "create_project_payload", + "ProjectType", + "Project", + "NetworkGroupTunnelVMSpec", + "NetworkGroupTunnelVMSpecType", + "NetworkGroupTunnel", + "NetworkGroupTunnelType", + "ahv_network_group_tunnel_vm_spec", + "Metadata", + "MetadataType", + "Brownfield", + "Environment", + "create_environment_payload", + "VmProfile", + "VmBlueprint", + "Endpoint", + "_endpoint", + "CalmEndpoint", + "AppProtection", + "JobScheduler", + "AhvVmRecoveryResources", + "ahv_vm_recovery_spec", + "Job", +] diff --git a/framework/calm/dsl/builtins/models/__init__.py b/framework/calm/dsl/builtins/models/__init__.py new file mode 100644 index 0000000..9f57c5b --- /dev/null +++ b/framework/calm/dsl/builtins/models/__init__.py @@ -0,0 +1,4 @@ +from .descriptor import DescriptorType +from .object_type import ObjectDict + +__all__ = ["ObjectDict", "DescriptorType"] diff --git a/framework/calm/dsl/builtins/models/action.py b/framework/calm/dsl/builtins/models/action.py new file mode 100644 index 0000000..2a7dcfb --- /dev/null +++ b/framework/calm/dsl/builtins/models/action.py @@ -0,0 +1,184 @@ +import inspect +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .task import create_call_rb +from .runbook import runbook, runbook_create + +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Action - Since action, runbook and DAG task are heavily coupled together, +# the action type behaves as all three. + + +class ActionType(EntityType): + __schema_name__ = "Action" + __openapi_type__ = "app_action" + + def __call__(cls, name=None): + return create_call_rb(cls.runbook, name=name) if cls.runbook else None + + def assign_targets(cls, parent_entity): + for task in cls.runbook.tasks: + if not task.target_any_local_reference: + task.target_any_local_reference = parent_entity.get_task_target() + + +class ActionValidator(PropertyValidator, openapi_type="app_action"): + __default__ = None + __kind__ = ActionType + + +def _action(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return ActionType(name, bases, kwargs) + + +Action = _action() + + +def _action_create(**kwargs): + name = kwargs.get("name", kwargs.get("__name__", None)) + bases = (Action,) + return ActionType(name, bases, kwargs) + + +class action(runbook): + """ + action descriptor + """ + + def __init__(self, user_func, task_target_mapping={}, imported_action=False): + """ + A decorator for generating runbooks from a function definition. + Args: + user_func (function): User defined function + task_target_mapping (dict): Mapping for task's target. Used for imported runboosk in blueprint + imported_action (boolean): True if runbook imported as action in blueprint + Returns: + (Runbook): Runbook class + """ + + super(action, self).__init__(user_func) + + # Will be used in runbooks imported to blueprint actions + self.task_target_mapping = task_target_mapping + self.imported_action = imported_action + + def __call__(self, name=None): + if self.user_runbook: + return create_call_rb(self.user_runbook, name=name) + + def __get__(self, instance, cls): + """ + Translate the user defined function to an action. + This method is called during compilation, when getattr() is called on the owner entity. + Args: + instance (object): Instance of cls + cls (Entity): Entity that this action is defined on + Returns: + (ActionType): Generated Action class + """ + if cls is None: + return self + + if self.imported_action: + # Only endpoints of type existing are supported + sig = inspect.signature(self.user_func) + for name, _ in sig.parameters.items(): + if name in ["endpoints", "credentials", "default"]: + if name in ["endpoints", "credentials"]: + LOG.error( + "{} are not supported for imported runbooks. Please use existing {} in the tasks.".format( + name, name + ) + ) + else: + LOG.error( + "{} are not supported for imported runbooks".format(name) + ) + sys.exit( + "Unknown parameter '{}' for imported runbooks".format(name) + ) + + super(action, self).__get__(instance, cls) + + # System action names + action_name = self.action_name + + ACTION_TYPE = "user" + func_name = self.user_func.__name__.lower() + if func_name.startswith("__") and func_name.endswith("__"): + SYSTEM = getattr(cls, "ALLOWED_SYSTEM_ACTIONS", {}) + FRAGMENT = getattr(cls, "ALLOWED_FRAGMENT_ACTIONS", {}) + if func_name in SYSTEM: + ACTION_TYPE = "system" + action_name = SYSTEM[func_name] + elif func_name in FRAGMENT: + ACTION_TYPE = "fragment" + action_name = FRAGMENT[func_name] + + else: + # `name` argument is only supported in non-system actions + sig = inspect.signature(self.user_func) + gui_display_name = sig.parameters.get("name", None) + if gui_display_name and gui_display_name.default != action_name: + action_name = gui_display_name.default + + # Case for imported runbooks in blueprints + if self.imported_action: + + # Mapping is compulsory for profile actions + if self.task_target_mapping: + # For now it is used to map runbook task's target to bp entities for PROFILE + # In runbook, the target will be endpoint. So it will be changed to target_endpoint + for _task in self.user_runbook.tasks[1:]: + if _task.target_any_local_reference: + _task.exec_target_reference = _task.target_any_local_reference + _task.target_any_local_reference = None + + if _task.name in self.task_target_mapping: + _task.target_any_local_reference = self.task_target_mapping[ + _task.name + ] + + # Non-Profile actions + else: + for _task in self.user_runbook.tasks[1:]: + if ( + _task.target_any_local_reference + and _task.target_any_local_reference.kind == "app_endpoint" + ): + _task.exec_target_reference = _task.target_any_local_reference + _task.target_any_local_reference = self.task_target + + # Finally create the action + self.user_action = _action_create( + **{ + "name": action_name, + "description": self.action_description, + "critical": ACTION_TYPE == "system", + "type": ACTION_TYPE, + "runbook": self.user_runbook, + } + ) + + return self.user_action + + +class parallel: + __calm_type__ = "parallel" + + +def get_runbook_action(runbook_obj, targets={}): + """ + Get action from the runbook object + """ + + user_func = runbook_obj.user_func + action_obj = action(user_func, task_target_mapping=targets, imported_action=True) + return action_obj diff --git a/framework/calm/dsl/builtins/models/ahv_recovery_vm.py b/framework/calm/dsl/builtins/models/ahv_recovery_vm.py new file mode 100644 index 0000000..b8c5fb7 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_recovery_vm.py @@ -0,0 +1,67 @@ +import sys +import uuid + +from .entity import Entity, EntityType +from .validator import PropertyValidator +from .helper import common as common_helper +from .ahv_vm import AhvVmResourcesType + +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.api.handle import get_api_client +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + + +# AhvRecoveryVm + + +class AhvVMRecoveryResourcesType(AhvVmResourcesType): + """Metaclass for ahv vm recovery resources""" + + __schema_name__ = "AhvVmRecoveryResources" + __openapi_type__ = "recovery_vm_ahv_resources" + + +class AhvVMRecoveryResourcesValidator( + PropertyValidator, openapi_type="recovery_vm_ahv_resources" +): + __default__ = None + __kind__ = AhvVMRecoveryResourcesType + + +def _ahv_vm_recovery_resources(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvVMRecoveryResourcesType(name, bases, kwargs) + + +AhvVmRecoveryResources = _ahv_vm_recovery_resources() + + +# AhvVmRecoverySpec + + +class AhvVMRecoverySpecType(EntityType): + """Metaclass for ahv vm recovery resources""" + + __schema_name__ = "AhvVmRecoverySpec" + __openapi_type__ = "recovery_vm_ahv_spec" + + +class AhvVMRecoverySpecValidator( + PropertyValidator, openapi_type="recovery_vm_ahv_spec" +): + __default__ = None + __kind__ = AhvVMRecoverySpecType + + +def ahv_vm_recovery_spec(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvVMRecoverySpecType(name, bases, kwargs) + + +AhvVMRecoverySpec = ahv_vm_recovery_spec() diff --git a/framework/calm/dsl/builtins/models/ahv_vm.py b/framework/calm/dsl/builtins/models/ahv_vm.py new file mode 100644 index 0000000..7d25827 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm.py @@ -0,0 +1,183 @@ +import sys + +from .calm_ref import Ref +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .provider_spec import ProviderSpecType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# AHV VM Resources + + +class AhvVmResourcesType(EntityType): + __schema_name__ = "AhvVmResources" + __openapi_type__ = "vm_ahv_resources" + + def compile(cls): + cdict = super().compile() + + ADAPTER_INDEX_MAP = {"SCSI": 0, "PCI": 0, "IDE": 0, "SATA": 0} + + # Traverse over disks and modify the adapter index in disk address + # Get boot config from disks also + boot_config = {} + disk_list = cdict.get("disk_list", []) + for disk in disk_list: + device_prop = disk.device_properties.get_dict() + adapter_type = device_prop["disk_address"]["adapter_type"] + + device_prop["disk_address"]["device_index"] = ADAPTER_INDEX_MAP[ + adapter_type + ] + ADAPTER_INDEX_MAP[adapter_type] += 1 + disk.device_properties = device_prop + + if disk.bootable and not boot_config: + boot_config = { + "boot_device": {"disk_address": device_prop["disk_address"]} + } + + elif disk.bootable and boot_config: + raise ValueError("More than one bootable disks found") + + # Converting memory from GiB to mib + cdict["memory_size_mib"] *= 1024 + + # Merging boot_type to boot_config + cdict["boot_config"] = boot_config + boot_type = cdict.pop("boot_type", None) + if boot_type == "UEFI": + cdict["boot_config"]["boot_type"] = "UEFI" + + if not cdict["boot_config"]: + cdict.pop("boot_config", None) + + serial_port_list = [] + if cdict.get("serial_port_list"): + for ind, connection_status in cdict["serial_port_list"].items(): + if not isinstance(ind, int): + raise TypeError("index {} is not of type integer".format(ind)) + + if not isinstance(connection_status, bool): + raise TypeError( + "connection status {} is not of type bool".format( + connection_status + ) + ) + + serial_port_list.append( + {"index": ind, "is_connected": connection_status} + ) + + cdict["serial_port_list"] = serial_port_list + + return cdict + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + # Check for serial ports + serial_port_list = cdict.pop("serial_port_list", []) + serial_port_dict = {} + for sp in serial_port_list: + serial_port_dict[sp["index"]] = sp["is_connected"] + + cdict["serial_port_list"] = serial_port_dict + + if not cdict.get("guest_customization", None): + cdict.pop("guest_customization", None) + + return super().decompile(cdict, prefix=prefix) + + +class AhvVmResourcesValidator(PropertyValidator, openapi_type="vm_ahv_resources"): + __default__ = None + __kind__ = AhvVmResourcesType + + +def ahv_vm_resources(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvVmResourcesType(name, bases, kwargs) + + +AhvVmResources = ahv_vm_resources() + + +# AHV VM + + +class AhvVmType(ProviderSpecType): + __schema_name__ = "AhvVm" + __openapi_type__ = "vm_ahv" + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + def compile(cls): + cdict = super().compile() + vpc_name, network_type = None, None + + for nic in cdict["resources"].nics: + if nic.vpc_reference: + if not network_type: + network_type = "OVERLAY" + elif network_type != "OVERLAY": + LOG.error( + "Network type mismatch - all subnets must either be vLANs or overlay subnets" + ) + sys.exit("Network type mismatch") + if "@@{" not in nic.vpc_reference["name"]: + if not vpc_name: + vpc_name = nic.vpc_reference["name"] + elif vpc_name != nic.vpc_reference["name"]: + LOG.error( + "VPC mismatch - all overlay subnets should belong to the same VPC" + ) + sys.exit("VPC mismatch") + + if nic.subnet_reference and nic.subnet_reference["cluster"]: + if not network_type: + network_type = "VLAN" + elif network_type != "VLAN": + LOG.error( + "Network type mismatch - all subnets must either be vLANs or overlay subnets" + ) + sys.exit("Network type mismatch") + + # if not cdict["cluster_reference"]: + # cluster = Ref.Cluster(name=nic.subnet_reference["cluster"]) + # cdict["cluster_reference"] = cluster + # cls.cluster = cluster + + return cdict + + +class AhvVmValidator(PropertyValidator, openapi_type="vm_ahv"): + __default__ = None + __kind__ = AhvVmType + + +def ahv_vm(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvVmType(name, bases, kwargs) + + +AhvVm = ahv_vm() diff --git a/framework/calm/dsl/builtins/models/ahv_vm_cluster.py b/framework/calm/dsl/builtins/models/ahv_vm_cluster.py new file mode 100644 index 0000000..a1e1e3c --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_cluster.py @@ -0,0 +1,89 @@ +import sys + +from .entity import EntityType, Entity +from .helper import common as common_helper +from .validator import PropertyValidator +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache + +LOG = get_logging_handle(__name__) + + +class AhvClusterType(EntityType): + __schema_name__ = "AhvVmCluster" + __openapi_type__ = "vm_ahv_cluster" + + def compile(cls): + + cdict = super().compile() + + cls_substrate = common_helper._walk_to_parent_with_given_type( + cls, "SubstrateType" + ) + account_uuid = ( + cls_substrate.get_referenced_account_uuid() if cls_substrate else "" + ) + + LOG.debug("Cluster CDict: {}".format(cdict)) + project, project_whitelist = common_helper.get_project_with_pc_account() + if not account_uuid: + account_uuid = list(project_whitelist.keys())[0] + + proj_whitelisted_cluster_uuids = ( + project_whitelist.get(account_uuid, {}).get("cluster_uuids") or [] + ) + + cluster_name = cdict.get("name", "") + cluster_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_CLUSTER, + name=cluster_name, + account_uuid=account_uuid, + ) + + if not cluster_data: + LOG.debug( + "Ahv Cluster (name = '{}') not found in registered Nutanix PC account (uuid = '{}') " + "in project (name = '{}')".format( + cluster_name, account_uuid, project["name"] + ) + ) + LOG.error( + "AHV Cluster {} not found. Please run: calm update cache".format( + cluster_name + ) + ) + sys.exit(-1) + + # Check if it is whitelisted in project + if cluster_data["uuid"] not in proj_whitelisted_cluster_uuids: + LOG.debug( + "Ahv Cluster (name = '{}') in registered Nutanix PC account (uuid = '{}') " + "not whitelisted in project (name = '{}')".format( + cluster_name, account_uuid, project["name"] + ) + ) + LOG.error( + "AHV Cluster {} not found. Please update project.".format(cluster_name) + ) + sys.exit(-1) + + # TODO check for environment whitelisting if substrate is part of env, check ahv_vm_nic implementation + cdict = {"name": cluster_name, "uuid": cluster_data["uuid"]} + + return cdict + + +class AhvClusterValidator(PropertyValidator, openapi_type="vm_ahv_cluster"): + __default__ = None + __kind__ = AhvClusterType + + +def ahv_vm_cluster(name, **kwargs): + bases = (Entity,) + return AhvClusterType(name, bases, kwargs) + + +class AhvCluster: + def __new__(cls, name=""): + return ahv_vm_cluster(name) diff --git a/framework/calm/dsl/builtins/models/ahv_vm_disk.py b/framework/calm/dsl/builtins/models/ahv_vm_disk.py new file mode 100644 index 0000000..fda53ae --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_disk.py @@ -0,0 +1,306 @@ +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .ref import ref + +from .package import PackageType +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle +from .helper import common as common_helper + +LOG = get_logging_handle(__name__) + +# AHV VM Disk + + +IMAGE_TYPE_MAP = {"DISK": "DISK_IMAGE", "CDROM": "ISO_IMAGE"} + + +class AhvDiskType(EntityType): + __schema_name__ = "AhvDisk" + __openapi_type__ = "vm_ahv_disk" + + def compile(cls): + cdict = super().compile() + # Pop bootable from cdict + cdict.pop("bootable", None) + + cls_substrate = common_helper._walk_to_parent_with_given_type( + cls, "SubstrateType" + ) + account_uuid = ( + cls_substrate.get_referenced_account_uuid() if cls_substrate else "" + ) + + # Fetch nutanix account in project + project, project_whitelist = common_helper.get_project_with_pc_account() + if not account_uuid: + account_uuid = list(project_whitelist.keys())[0] + + image_ref = cdict.get("data_source_reference") or dict() + if image_ref and image_ref["kind"] == "image": + image_name = image_ref.get("name") + device_type = cdict["device_properties"].get("device_type") + + if image_name.startswith("@@{") and image_name.endswith("}@@"): + cdict["data_source_reference"] = { + "kind": "image", + "uuid": image_name, + } + else: + image_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_DISK_IMAGE, + name=image_name, + image_type=IMAGE_TYPE_MAP[device_type], + account_uuid=account_uuid, + ) + if not image_cache_data: + LOG.debug( + "Ahv Disk Image (name = '{}') not found in registered nutanix_pc account (uuid = '{}') in project (name = '{}')".format( + image_name, account_uuid, project["name"] + ) + ) + LOG.error( + "Ahv Disk Image {} of type {} not found. Please run: calm update cache".format( + image_name, IMAGE_TYPE_MAP[device_type] + ) + ) + sys.exit(-1) + + image_uuid = image_cache_data.get("uuid", "") + cdict["data_source_reference"] = { + "kind": "image", + "name": image_name, + "uuid": image_uuid, + } + + return cdict + + +class AhvDiskValidator(PropertyValidator, openapi_type="vm_ahv_disk"): + __default__ = None + __kind__ = AhvDiskType + + +def ahv_vm_disk(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvDiskType(name, bases, kwargs) + + +def allocate_on_storage_container(adapter_type="SCSI", size=8): + kwargs = { + "device_properties": { + "device_type": "DISK", + "disk_address": {"adapter_type": adapter_type, "device_index": -1}, + }, + "disk_size_mib": size * 1024, + } + + return ahv_vm_disk(**kwargs) + + +def update_disk_config( + device_type="DISK", adapter_type="SCSI", image_data={}, bootable=False +): + if not image_data: + raise ValueError("Image data not found") + + kwargs = { + "data_source_reference": image_data, + "device_properties": { + "device_type": device_type, + "disk_address": {"adapter_type": adapter_type, "device_index": -1}, + }, + "disk_size_mib": 0, + "bootable": bootable, + } + + return ahv_vm_disk(**kwargs) + + +def clone_from_image_service( + device_type="DISK", adapter_type="SCSI", image_name="", bootable=False +): + + if not image_name: + LOG.error("image_name not provided") + sys.exit(-1) + + # image_uuid will be added at compile time as it requires project context + image_data = {"kind": "image", "name": image_name} + + return update_disk_config(device_type, adapter_type, image_data, bootable) + + +def clone_from_vm_image_service( + device_type="DISK", adapter_type="SCSI", bootable=False, vm_disk_package=None +): + if not vm_disk_package: + raise ValueError("vm_disk_package not provided !!!") + + if not isinstance(vm_disk_package, PackageType): + raise TypeError("{} is not of type {}".format(vm_disk_package, PackageType)) + + AHV_IMAGE_TYPES = {"DISK": "DISK_IMAGE", "CDROM": "ISO_IMAGE"} + pkg = vm_disk_package.compile() + vm_image_type = pkg["options"]["resources"]["image_type"] + + if vm_image_type != AHV_IMAGE_TYPES[device_type]: + raise ValueError("Invalid vm image {} supplied in disk".format(vm_disk_package)) + + image_data = ref(vm_disk_package).compile() + + return update_disk_config(device_type, adapter_type, image_data, bootable) + + +def empty_cd_rom(adapter_type="IDE"): + kwargs = { + "device_properties": { + "device_type": "CDROM", + "disk_address": {"adapter_type": adapter_type, "device_index": -1}, + }, + "disk_size_mib": 0, + } + + return ahv_vm_disk(**kwargs) + + +def disk_scsi_clone_from_image(image_name=None, bootable=False): + return clone_from_image_service( + device_type="DISK", + adapter_type="SCSI", + image_name=image_name, + bootable=bootable, + ) + + +def disk_pci_clone_from_image(image_name=None, bootable=False): + return clone_from_image_service( + device_type="DISK", adapter_type="PCI", image_name=image_name, bootable=bootable + ) + + +def cd_rom_ide_clone_from_image(image_name=None, bootable=False): + return clone_from_image_service( + device_type="CDROM", + adapter_type="IDE", + image_name=image_name, + bootable=bootable, + ) + + +def cd_rom_sata_clone_from_image(image_name=None, bootable=False): + return clone_from_image_service( + device_type="CDROM", + adapter_type="SATA", + image_name=image_name, + bootable=bootable, + ) + + +def disk_scsi_clone_from_pkg_image(vm_disk_package=None, bootable=False): + return clone_from_vm_image_service( + device_type="DISK", + adapter_type="SCSI", + vm_disk_package=vm_disk_package, + bootable=bootable, + ) + + +def disk_pci_clone_from_pkg_image(vm_disk_package=None, bootable=False): + return clone_from_vm_image_service( + device_type="DISK", + adapter_type="PCI", + vm_disk_package=vm_disk_package, + bootable=bootable, + ) + + +def cd_rom_ide_clone_from_pkg_image(vm_disk_package=None, bootable=False): + return clone_from_vm_image_service( + device_type="CDROM", + adapter_type="IDE", + vm_disk_package=vm_disk_package, + bootable=bootable, + ) + + +def cd_rom_sata_clone_from_pkg_image(vm_disk_package=None, bootable=False): + return clone_from_vm_image_service( + device_type="CDROM", + adapter_type="SATA", + vm_disk_package=vm_disk_package, + bootable=bootable, + ) + + +def disk_scsi_allocate_on_container(size=8): + return allocate_on_storage_container(adapter_type="SCSI", size=size) + + +def disk_pci_allocate_on_container(size=8): + return allocate_on_storage_container(adapter_type="PCI", size=size) + + +def cd_rom_ide_use_empty_cd_rom(): + return empty_cd_rom(adapter_type="IDE") + + +def cd_rom_sata_use_empty_cd_rom(): + return empty_cd_rom(adapter_type="SATA") + + +class AhvVmDisk: + def __new__(cls, image_name=None, bootable=False): + return disk_scsi_clone_from_image(image_name=image_name, bootable=bootable) + + class Disk: + def __new__(cls, image_name=None, bootable=False): + return disk_scsi_clone_from_image(image_name=image_name, bootable=bootable) + + class Scsi: + def __new__(cls, image_name=None, bootable=False): + return disk_scsi_clone_from_image( + image_name=image_name, bootable=bootable + ) + + cloneFromImageService = disk_scsi_clone_from_image + allocateOnStorageContainer = disk_scsi_allocate_on_container + cloneFromVMDiskPackage = disk_scsi_clone_from_pkg_image + + class Pci: + def __new__(cls, image_name=None, bootable=False): + return disk_pci_clone_from_image( + image_name=image_name, bootable=bootable + ) + + cloneFromImageService = disk_pci_clone_from_image + allocateOnStorageContainer = disk_pci_allocate_on_container + cloneFromVMDiskPackage = disk_pci_clone_from_pkg_image + + class CdRom: + def __new__(cls, image_name=None, bootable=False): + return cd_rom_ide_clone_from_image(image_name=image_name, bootable=bootable) + + class Ide: + def __new__(cls, image_name=None, bootable=False): + return cd_rom_ide_clone_from_image( + image_name=image_name, bootable=bootable + ) + + cloneFromImageService = cd_rom_ide_clone_from_image + emptyCdRom = cd_rom_ide_use_empty_cd_rom + cloneFromVMDiskPackage = cd_rom_ide_clone_from_pkg_image + + class Sata: + def __new__(cls, image_name=None, bootable=False): + return cd_rom_sata_clone_from_image( + image_name=image_name, bootable=bootable + ) + + cloneFromImageService = cd_rom_sata_clone_from_image + emptyCdRom = cd_rom_sata_use_empty_cd_rom + cloneFromVMDiskPackage = cd_rom_sata_clone_from_pkg_image diff --git a/framework/calm/dsl/builtins/models/ahv_vm_gc.py b/framework/calm/dsl/builtins/models/ahv_vm_gc.py new file mode 100644 index 0000000..7817fa1 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_gc.py @@ -0,0 +1,230 @@ +import re +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .utils import read_file, yaml +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# AHV Guest Customization + + +class AhvGCType(EntityType): + __schema_name__ = "AhvGuestCustomization" + __openapi_type__ = "vm_ahv_gc" + + +class AhvGCValidator(PropertyValidator, openapi_type="vm_ahv_gc"): + __default__ = None + __kind__ = AhvGCType + + +def ahv_vm_guest_customization(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvGCType(name, bases, kwargs) + + +def create_ahv_guest_customization( + customization_type="cloud_init", + user_data="", + unattend_xml="", + install_type="FRESH", + is_domain=False, + domain="", + dns_ip="", + dns_search_path="", + credential=None, +): + + if customization_type == "cloud_init": + kwargs = {"cloud_init": {"user_data": user_data}} + + elif customization_type == "sysprep": + kwargs = { + "sysprep": { + "unattend_xml": unattend_xml, + "install_type": install_type, + "is_domain": is_domain, + "domain": domain, + "dns_ip": dns_ip, + "dns_search_path": dns_search_path, + "credential": credential, + } + } + + return ahv_vm_guest_customization(**kwargs) + + +def cloud_init(filename=None, config={}): + """ + Returns cloud_init guest customization object + NOTE: If file content are yaml, macros should not be enclosed in quotes. + """ + + if not config: + # reading the file + config = read_file(filename, depth=3) + + if re.match(r"\s*\|-", config): + config = yaml.safe_load(config) + + # If file content is dict or it do not contains macro(yaml content), then safe load the file + if re.match(r"\s*{", config) or (not re.search("@@{.*}@@", config)): + # Converting config to json object + config = yaml.safe_load(config) + + else: # If file content is yaml and it contains macro + return create_ahv_guest_customization( + customization_type="cloud_init", user_data=config + ) + + config = "#cloud-config\n" + yaml.dump(config, default_flow_style=False) + + # Case when a dict config is dumped to yaml, macros do come with quotes + + # Single quote near macro + if len(re.findall(r"'@@{\s*", config)) != len(re.findall(r"}@@\s*'", config)): + LOG.debug("Cloud_Init : {}".format(config)) + LOG.error("Invalid cloud_init found") + sys.exit(-1) + + # Double quotes near mcro + if len(re.findall(r'"@@{\s*', config)) != len(re.findall(r'}@@\s*"', config)): + LOG.debug("Cloud_Init : {}".format(config)) + LOG.error("Invalid cloud_init found") + sys.exit(-1) + + # Remove single quote with macro + config = re.sub(r"'@@{\s*", "@@{", config) + config = re.sub(r"}@@\s*'", "}@@", config) + + # Remove dobut quote wuth macro + config = re.sub(r'"@@{\s*', "@@{", config) + config = re.sub(r'}@@\s*"', "}@@", config) + + return create_ahv_guest_customization( + customization_type="cloud_init", user_data=config + ) + + +def fresh_sys_prep_with_domain( + domain="", + dns_ip="", + dns_search_path="", + credential=None, + filename=None, + unattend_xml="", +): + """Returns fresh install with domain sysprep guest customization object""" + + if not unattend_xml: + if filename: + unattend_xml = read_file(filename, depth=3) + + return create_ahv_guest_customization( + customization_type="sysprep", + install_type="FRESH", + unattend_xml=unattend_xml, + is_domain=True, + domain=domain, + dns_ip=dns_ip, + dns_search_path=dns_search_path, + credential=credential, + ) + + +def fresh_sys_prep_without_domain(filename=None, unattend_xml=""): + """Returns fresh install without domain sysprep guest customization object""" + + if not unattend_xml: + if filename: + unattend_xml = read_file(filename, depth=3) + + return create_ahv_guest_customization( + customization_type="sysprep", + install_type="FRESH", + unattend_xml=unattend_xml, + is_domain=False, + domain="", + dns_ip="", + dns_search_path="", + credential=None, + ) + + +def prepared_sys_prep_with_domain( + domain="", + dns_ip="", + dns_search_path="", + credential=None, + filename=None, + unattend_xml="", +): + """Returns prepared install with domain sysprep guest customization object""" + + if not unattend_xml: + if filename: + unattend_xml = read_file(filename, depth=3) + + return create_ahv_guest_customization( + customization_type="sysprep", + install_type="PREPARED", + unattend_xml=unattend_xml, + is_domain=True, + domain=domain, + dns_ip=dns_ip, + dns_search_path=dns_search_path, + credential=credential, + ) + + +def prepared_sys_prep_without_domain(filename=None, unattend_xml=""): + """Returns prepared install without domain sysprep guest customization object""" + + if not unattend_xml: + if filename: + unattend_xml = read_file(filename, depth=3) + + return create_ahv_guest_customization( + customization_type="sysprep", + install_type="PREPARED", + unattend_xml=unattend_xml, + is_domain=False, + domain="", + dns_ip="", + dns_search_path="", + credential=None, + ) + + +class AhvVmGC: + class CloudInit: + def __new__(cls, filename=None, config={}): + return cloud_init(filename=filename, config=config) + + class Sysprep: + def __new__(cls, filename=None, unattend_xml=""): + return fresh_sys_prep_without_domain( + filename=filename, unattend_xml=unattend_xml + ) + + class FreshScript: + def __new__(cls, filename=None, unattend_xml=""): + return fresh_sys_prep_without_domain( + filename=filename, unattend_xml=unattend_xml + ) + + withDomain = fresh_sys_prep_with_domain + withoutDomain = fresh_sys_prep_without_domain + + class PreparedScript: + def __new__(cls, filename=None, unattend_xml=""): + return prepared_sys_prep_without_domain( + filename=filename, unattend_xml=unattend_xml + ) + + withDomain = prepared_sys_prep_with_domain + withoutDomain = prepared_sys_prep_without_domain diff --git a/framework/calm/dsl/builtins/models/ahv_vm_gpu.py b/framework/calm/dsl/builtins/models/ahv_vm_gpu.py new file mode 100644 index 0000000..7a15966 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_gpu.py @@ -0,0 +1,94 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# AHV GPU + + +class AhvGpuType(EntityType): + __schema_name__ = "AhvGpu" + __openapi_type__ = "vm_ahv_gpu" + + +class AhvGpuValidator(PropertyValidator, openapi_type="vm_ahv_gpu"): + __default__ = None + __kind__ = AhvGpuType + + +def ahv_vm_gpu(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvGpuType(name, bases, kwargs) + + +def create_ahv_gpu(vendor="", mode="", device_id=-1): + + kwargs = {"vendor": vendor, "mode": mode, "device_id": device_id} + + return ahv_vm_gpu(**kwargs) + + +def amd_gpu_pass_through_graphic_mode(device_id=-1): + return create_ahv_gpu( + vendor="AMD", mode="PASSTHROUGH_GRAPHICS", device_id=device_id + ) + + +def amd_gpu_pass_through_compute_mode(device_id=-1): + return create_ahv_gpu(vendor="AMD", mode="PASSTHROUGH_COMPUTE", device_id=device_id) + + +def amd_gpu_virtual_mode(device_id=-1): + return create_ahv_gpu(vendor="AMD", mode="VIRTUAL", device_id=device_id) + + +def intel_gpu_pass_through_graphic_mode(device_id=-1): + return create_ahv_gpu( + vendor="INTEL", mode="PASSTHROUGH_GRAPHICS", device_id=device_id + ) + + +def intel_gpu_pass_through_compute_mode(device_id=-1): + return create_ahv_gpu( + vendor="INTEL", mode="PASSTHROUGH_COMPUTE", device_id=device_id + ) + + +def intel_gpu_virtual_mode(device_id=-1): + return create_ahv_gpu(vendor="INTEL", mode="VIRTUAL", device_id=device_id) + + +def nvidia_gpu_pass_through_graphic_mode(device_id=-1): + return create_ahv_gpu( + vendor="NVIDIA", mode="PASSTHROUGH_GRAPHICS", device_id=device_id + ) + + +def nvidia_gpu_pass_through_compute_mode(device_id=-1): + return create_ahv_gpu( + vendor="NVIDIA", mode="PASSTHROUGH_COMPUTE", device_id=device_id + ) + + +def nvidia_gpu_virtual_mode(device_id=-1): + return create_ahv_gpu(vendor="NVIDIA", mode="VIRTUAL", device_id=device_id) + + +class AhvVmGpu: + class Amd: + + passThroughGraphic = amd_gpu_pass_through_graphic_mode + passThroughCompute = amd_gpu_pass_through_compute_mode + virtual = amd_gpu_virtual_mode + + class Intel: + + passThroughGraphic = intel_gpu_pass_through_graphic_mode + passThroughCompute = intel_gpu_pass_through_compute_mode + virtual = intel_gpu_virtual_mode + + class Nvidia: + + passThroughGraphic = nvidia_gpu_pass_through_graphic_mode + passThroughCompute = nvidia_gpu_pass_through_compute_mode + virtual = nvidia_gpu_virtual_mode diff --git a/framework/calm/dsl/builtins/models/ahv_vm_nic.py b/framework/calm/dsl/builtins/models/ahv_vm_nic.py new file mode 100644 index 0000000..732c6e0 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_nic.py @@ -0,0 +1,408 @@ +import sys + +from .ahv_vm_vpc import AhvVpc +from .entity import EntityType, Entity +from .validator import PropertyValidator +from calm.dsl.store import Cache +from .helper import common as common_helper +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# AHV Nic + + +class AhvNicType(EntityType): + __schema_name__ = "AhvNic" + __openapi_type__ = "vm_ahv_nic" + + def compile(cls): + + cdict = super().compile() + + cls_substrate = common_helper._walk_to_parent_with_given_type( + cls, "SubstrateType" + ) + account_uuid = ( + cls_substrate.get_referenced_account_uuid() if cls_substrate else "" + ) + + # Fetch nutanix account in project + project, project_whitelist = common_helper.get_project_with_pc_account() + if not account_uuid: + account_uuid = list(project_whitelist.keys())[0] + + project_whitelist_subnet_uuids = project_whitelist.get(account_uuid, {}).get( + "subnet_uuids", [] + ) + + subnet_ref = cdict.get("subnet_reference") or dict() + subnet_name = subnet_ref.get("name", "") or "" + + vpc_ref = cdict.get("vpc_reference") or dict() + vpc_name = vpc_ref.get("name", "") or "" + + if subnet_name.startswith("@@{") and subnet_name.endswith("}@@"): + cdict["subnet_reference"] = { + "kind": "subnet", + "uuid": subnet_name, + } + + elif subnet_name: + cluster_name = subnet_ref.get("cluster", "") + + subnet_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_SUBNET, + name=subnet_name, + cluster=cluster_name, + vpc=vpc_name, + account_uuid=account_uuid, + ) + + if not subnet_cache_data: + LOG.debug( + "Ahv Subnet (name = '{}') not found in registered Nutanix PC account (uuid = '{}') ".format( + subnet_name, account_uuid + ) + ) + sys.exit("AHV Subnet {} not found.".format(subnet_name)) + + vpc_name = subnet_cache_data.get("vpc_name", "") + vpc_uuid = subnet_cache_data.get("vpc_uuid", "") + cluster_name = subnet_cache_data.get("cluster_name", "") + + if ( + cluster_name + and cls_substrate + and cls_substrate.provider_spec + and cls_substrate.provider_spec.cluster + and cluster_name != str(cls_substrate.provider_spec.cluster) + ): + substrate_cluster = str(cls_substrate.provider_spec.cluster) + if not ( + substrate_cluster.startswith("@@{") + and substrate_cluster.endswith("}@@") + ): + sys.exit( + "Cluster mismatch - All VLANs should belong to same cluster" + ) + + if ( + vpc_name + and cls_substrate + and cls_substrate.provider_spec + and not cls_substrate.provider_spec.cluster + ): + sys.exit("Cluster reference is mandatory for Overlay NICs") + + # If substrate defined under environment model + subnet_uuid = subnet_cache_data.get("uuid", "") + cls_env = common_helper._walk_to_parent_with_given_type( + cls, "EnvironmentType" + ) + if cls_env: + infra = getattr(cls_env, "providers", []) + for _pdr in infra: + if _pdr.type == "nutanix_pc": + subnet_references = getattr(_pdr, "subnet_reference_list", []) + subnet_references.extend( + getattr(_pdr, "external_network_list", []) + ) + sr_list = [_sr.get_dict()["uuid"] for _sr in subnet_references] + if subnet_uuid not in sr_list: + LOG.error( + "Subnet '{}' not whitelisted in environment '{}'".format( + subnet_name, str(cls_env) + ) + ) + sys.exit(-1) + + # If provider_spec is defined under substrate and substrate is defined under blueprint model + elif cls_substrate: + pfl_env = cls_substrate.get_profile_environment() + if pfl_env: + environment_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=pfl_env["uuid"] + ) + if not environment_cache_data: + LOG.error( + "Environment {} not found. Please run: calm update cache".format( + pfl_env["name"] + ) + ) + sys.exit(-1) + + env_accounts = environment_cache_data.get("accounts_data", {}).get( + "nutanix_pc", [] + ) + if subnet_uuid not in env_accounts.get(account_uuid, []): + LOG.error( + "Subnet {} is not whitelisted in environment {}".format( + subnet_name, str(pfl_env) + ) + ) + sys.exit(-1) + + elif subnet_uuid not in project_whitelist_subnet_uuids: + LOG.error( + "Subnet {} is not whitelisted in project {}".format( + subnet_name, project["name"] + ) + ) + sys.exit(-1) + + cdict["subnet_reference"] = { + "kind": "subnet", + "name": subnet_name, + "uuid": subnet_uuid, + } + if vpc_name: + cdict["vpc_reference"] = AhvVpc(vpc_name) + + nfc_ref = cdict.get("network_function_chain_reference") or dict() + nfc_name = nfc_ref.get("name", "") + if nfc_name: + nfc_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_NETWORK_FUNCTION_CHAIN, name=nfc_name + ) + + if not nfc_cache_data: + LOG.debug( + "Ahv Network Function Chain (name = '{}') not found in registered nutanix_pc account (uuid = '{}') in project (name = '{}')".format( + nfc_name, account_uuid, project["name"] + ) + ) + LOG.error( + "AHV Network Function Chain {} not found. Please run: calm update cache".format( + nfc_name + ) + ) + sys.exit(-1) + + nfc_uuid = nfc_cache_data.get("uuid", "") + cdict["network_function_chain_reference"] = { + "name": nfc_name, + "uuid": nfc_uuid, + "kind": "network_function_chain", + } + + return cdict + + +class AhvNicValidator(PropertyValidator, openapi_type="vm_ahv_nic"): + __default__ = None + __kind__ = AhvNicType + + +def ahv_vm_nic(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvNicType(name, bases, kwargs) + + +def create_ahv_nic( + subnet=None, + network_function_nic_type="INGRESS", + nic_type="NORMAL_NIC", + network_function_chain=None, # TODO Deal with it + mac_address="", + ip_endpoints=[], + cluster=None, + vpc=None, +): + + if vpc and cluster: + LOG.error( + "Invalid params [vpc, subnet] passed for Ahv Subnet (name = '{}'). Ahv Subnet can have only one of [vpc, cluster]" + ) + sys.exit("Invalid params [vpc, cluster] passed for subnet {}".format(subnet)) + kwargs = {} + + if subnet: + # Cluster name is used to find subnet uuid at compile time + kwargs["subnet_reference"] = { + "name": subnet, + "kind": "subnet", + "cluster": cluster, + } + if vpc: + kwargs["vpc_reference"] = { + "name": vpc, + "kind": "vpc", + } + + if network_function_chain: + kwargs["network_function_chain_reference"] = { + "name": network_function_chain, + "kind": "network_function_chain", + } + + for ip in ip_endpoints: + if not kwargs.get("ip_endpoint_list"): + kwargs["ip_endpoint_list"] = [] + + # Note the IP type is set to be ASSIGNED always + kwargs["ip_endpoint_list"].append({"ip": ip, "type": "ASSIGNED"}) + + kwargs.update( + { + "network_function_nic_type": network_function_nic_type, + "nic_type": nic_type, + "mac_address": mac_address, + } + ) + + return ahv_vm_nic(**kwargs) + + +def normal_ingress_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="INGRESS", + nic_type="NORMAL_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def normal_egress_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="EGRESS", + nic_type="NORMAL_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def normal_tap_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="TAP", + nic_type="NORMAL_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def direct_ingress_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="INGRESS", + nic_type="DIRECT_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def direct_egress_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="EGRESS", + nic_type="DIRECT_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def direct_tap_nic(subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return create_ahv_nic( + subnet=subnet, + network_function_nic_type="TAP", + nic_type="DIRECT_NIC", + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + +def network_function_ingress_nic(mac_address="", network_function_chain=None): + return create_ahv_nic( + network_function_nic_type="INGRESS", + nic_type="NETWORK_FUNCTION_NIC", + mac_address=mac_address, + network_function_chain=network_function_chain, + ) + + +def network_function_egress_nic(mac_address="", network_function_chain=None): + return create_ahv_nic( + network_function_nic_type="EGRESS", + nic_type="NETWORK_FUNCTION_NIC", + mac_address=mac_address, + network_function_chain=network_function_chain, + ) + + +def network_function_tap_nic(mac_address="", network_function_chain=None): + return create_ahv_nic( + network_function_nic_type="TAP", + nic_type="NETWORK_FUNCTION_NIC", + mac_address=mac_address, + network_function_chain=network_function_chain, + ) + + +class AhvVmNic: + def __new__(cls, subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None): + return normal_ingress_nic( + subnet=subnet, + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + class NormalNic: + def __new__( + cls, subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None + ): + return normal_ingress_nic( + subnet=subnet, + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + ingress = normal_ingress_nic + egress = normal_egress_nic + tap = normal_tap_nic + + class DirectNic: + def __new__( + cls, subnet, mac_address="", ip_endpoints=[], cluster=None, vpc=None + ): + return direct_ingress_nic( + subnet=subnet, + mac_address=mac_address, + ip_endpoints=ip_endpoints, + cluster=cluster, + vpc=vpc, + ) + + ingress = direct_ingress_nic + egress = direct_egress_nic + tap = direct_tap_nic + + class NetworkFunctionNic: + def __new__(cls, mac_address="", network_function_chain=None): + return network_function_ingress_nic( + mac_address=mac_address, network_function_chain=network_function_chain + ) + + ingress = network_function_ingress_nic + egress = network_function_egress_nic + tap = network_function_tap_nic diff --git a/framework/calm/dsl/builtins/models/ahv_vm_vpc.py b/framework/calm/dsl/builtins/models/ahv_vm_vpc.py new file mode 100644 index 0000000..ccaa7be --- /dev/null +++ b/framework/calm/dsl/builtins/models/ahv_vm_vpc.py @@ -0,0 +1,86 @@ +import sys + +from .entity import EntityType, Entity +from .helper import common as common_helper +from .validator import PropertyValidator +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache + +LOG = get_logging_handle(__name__) + + +class AhvVpcType(EntityType): + __schema_name__ = "AhvVmVpc" + __openapi_type__ = "vm_ahv_vpc" + + def compile(cls): + + cdict = super().compile() + LOG.debug("cdict parent: {}".format(cdict)) + cls_substrate = common_helper._walk_to_parent_with_given_type( + cls, "SubstrateType" + ) + account_uuid = ( + cls_substrate.get_referenced_account_uuid() if cls_substrate else "" + ) + + LOG.debug("Vpc CDict: {}".format(cdict)) + + project, project_whitelist = common_helper.get_project_with_pc_account() + if not account_uuid: + account_uuid = list(project_whitelist.keys())[0] + + proj_whitelisted_vpc_uuids = ( + project_whitelist.get(account_uuid, {}).get("vpc_uuids") or [] + ) + + vpc_name = cdict.get("name", "") + vpc_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_VPC, + name=vpc_name, + account_uuid=account_uuid, + ) + LOG.debug("Account uuid: {}".format(account_uuid)) + if not vpc_data: + LOG.debug( + "Ahv Vpc (name = '{}') not found in registered Nutanix PC account (uuid = '{}') " + "in project (name = '{}')".format( + vpc_name, account_uuid, project["name"] + ) + ) + LOG.error( + "AHV Vpc {} not found. Please run: calm update cache".format(vpc_name) + ) + sys.exit(-1) + + # Check if it is whitelisted in project + if vpc_data["uuid"] not in proj_whitelisted_vpc_uuids: + LOG.debug( + "Ahv Vpc (name = '{}') in registered Nutanix PC account (uuid = '{}') " + "not whitelisted in project (name = '{}')".format( + vpc_name, account_uuid, project["name"] + ) + ) + LOG.error("AHV Vpc {} not found. Please update project.".format(vpc_name)) + sys.exit(-1) + + # TODO check for environment whitelisting if substrate is part of env, check ahv_vm_nic implementation + cdict = {"name": vpc_name, "uuid": vpc_data["uuid"], "kind": "vpc"} + + return cdict + + +class AhvVpcValidator(PropertyValidator, openapi_type="vm_ahv_vpc"): + __default__ = None + __kind__ = AhvVpcType + + +def ahv_vm_vpc(name, **kwargs): + bases = (Entity,) + return AhvVpcType(name, bases, kwargs) + + +class AhvVpc: + def __new__(cls, name=""): + return ahv_vm_vpc(name) diff --git a/framework/calm/dsl/builtins/models/app_edit.py b/framework/calm/dsl/builtins/models/app_edit.py new file mode 100644 index 0000000..b76cf3c --- /dev/null +++ b/framework/calm/dsl/builtins/models/app_edit.py @@ -0,0 +1,31 @@ +import sys + +from calm.dsl.log import get_logging_handle + +from .config_spec import patch_config_create + +LOG = get_logging_handle(__name__) + + +class AppEdit: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class UpdateConfig: + def __new__( + cls, + name, + target, + patch_attrs, + ): + if target.__self__.substrate.__self__.provider_type != "AHV_VM": + LOG.error( + "Config is not supported for {} provider. Please try again after changing the provider".format( + target.__self__.substrate.__self__.provider_type + ) + ) + return patch_config_create( + name, + target=target, + patch_attrs=patch_attrs, + ) diff --git a/framework/calm/dsl/builtins/models/app_protection.py b/framework/calm/dsl/builtins/models/app_protection.py new file mode 100644 index 0000000..5df328a --- /dev/null +++ b/framework/calm/dsl/builtins/models/app_protection.py @@ -0,0 +1,111 @@ +import sys + +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache + +from .config_spec import snapshot_config_create, restore_config_create +from .helper import common as common_helper + +LOG = get_logging_handle(__name__) + + +class AppProtection: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class ProtectionPolicy: + def __new__(cls, name, **kwargs): + rule_name = kwargs.get("rule_name", None) + rule_uuid = kwargs.get("rule_uuid", None) + project_cache_data = common_helper.get_cur_context_project() + project_name = project_cache_data.get("name") + protection_policy_cache_data = Cache.get_entity_data( + entity_type="app_protection_policy", + name=name, + rule_name=rule_name, + rule_uuid=rule_uuid, + project_name=project_name, + ) + + if not protection_policy_cache_data: + LOG.error( + "Protection Policy {} not found. Please run: calm update cache".format( + name + ) + ) + sys.exit("Protection policy {} does not exist".format(name)) + return { + "kind": "app_protection_policy", + "name": protection_policy_cache_data["name"], + "uuid": protection_policy_cache_data["uuid"], + "rule_uuid": protection_policy_cache_data["rule_uuid"], + } + + class SnapshotConfig: + def __new__( + cls, + name, + target=None, + num_of_replicas="ONE", + restore_config=None, + policy=None, + description="", + ): + if restore_config: + return snapshot_config_create( + name, + target=target, + num_of_replicas=num_of_replicas, + config_references=[restore_config], + policy=policy, + description=description, + ) + return snapshot_config_create( + name, + target=target, + num_of_replicas=num_of_replicas, + policy=policy, + description=description, + ) + + class CrashConsistent: + def __new__( + cls, + name, + target=None, + num_of_replicas="ONE", + restore_config=None, + policy=None, + description="", + ): + if restore_config: + return snapshot_config_create( + name, + target=target, + num_of_replicas=num_of_replicas, + config_references=[restore_config], + policy=policy, + description=description, + ) + return snapshot_config_create( + name, + target=target, + num_of_replicas=num_of_replicas, + policy=policy, + description=description, + ) + + class RestoreConfig: + def __new__( + cls, + name, + target=None, + delete_vm_post_restore=False, + description="", + ): + return restore_config_create( + name, + target=target, + delete_vm_post_restore=delete_vm_post_restore, + description=description, + ) diff --git a/framework/calm/dsl/builtins/models/blueprint.py b/framework/calm/dsl/builtins/models/blueprint.py new file mode 100644 index 0000000..bd89b05 --- /dev/null +++ b/framework/calm/dsl/builtins/models/blueprint.py @@ -0,0 +1,90 @@ +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Blueprint + + +class BlueprintType(EntityType): + __schema_name__ = "Blueprint" + __openapi_type__ = "app_blueprint" + + def compile(cls): + def unzip_pod_deployments(cdict): + """Unzip pod deployment if exists""" + + for profile in cdict["app_profile_list"]: + deployments = getattr(profile, "deployments", []) + + normal_deployments = [] + for dep in deployments: + if dep.type == "K8S_DEPLOYMENT": + pod_dict = dep.extract_deployment() + + normal_deployments.extend( + pod_dict["deployment_definition_list"] + ) + cdict["package_definition_list"].extend( + pod_dict["package_definition_list"] + ) + cdict["substrate_definition_list"].extend( + pod_dict["substrate_definition_list"] + ) + cdict["published_service_definition_list"].extend( + pod_dict["published_service_definition_list"] + ) + + else: + normal_deployments.append(dep) + + setattr(profile, "deployments", normal_deployments) + + return cdict + + cdict = super().compile() + cdict = unzip_pod_deployments(cdict) + + # Searching for brownfield deployments + is_brownfield = False + for profile in cdict.get("app_profile_list", []): + for dep in profile.deployments: + if dep.type == "BROWNFIELD": + is_brownfield = True + + if is_brownfield: + cdict["type"] = "BROWNFIELD" + + # Multiple profiles are not allowed in brownfield blueprint (UI behaviour) + if len(cdict["app_profile_list"]) > 1: + LOG.error("Multiple profiles are not allowed in brownfield application") + sys.exit(-1) + + default_cred = cdict.pop("default_credential_local_reference", None) + if not default_cred: + for cred in cdict.get("credential_definition_list") or []: + if cred.default: + default_cred = cred.get_ref() + break + + if default_cred: + cdict["default_credential_local_reference"] = default_cred + + return cdict + + +class BlueprintValidator(PropertyValidator, openapi_type="app_blueprint"): + __default__ = None + __kind__ = BlueprintType + + +def blueprint(**kwargs): + name = kwargs.pop("name", None) + bases = (Entity,) + return BlueprintType(name, bases, kwargs) + + +Blueprint = blueprint() diff --git a/framework/calm/dsl/builtins/models/blueprint_payload.py b/framework/calm/dsl/builtins/models/blueprint_payload.py new file mode 100644 index 0000000..a943056 --- /dev/null +++ b/framework/calm/dsl/builtins/models/blueprint_payload.py @@ -0,0 +1,80 @@ +from calm.dsl.config import get_context + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .blueprint import BlueprintType +from .simple_blueprint import SimpleBlueprintType +from .calm_ref import Ref + + +# Blueprint Payload + + +class BlueprintPayloadType(EntityType): + __schema_name__ = "BlueprintPayload" + __openapi_type__ = "app_blueprint_payload" + + +class BlueprintPayloadValidator( + PropertyValidator, openapi_type="app_blueprint_payload" +): + __default__ = None + __kind__ = BlueprintPayloadType + + +def _blueprint_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return BlueprintPayloadType(name, bases, kwargs) + + +BlueprintPayload = _blueprint_payload() + + +def create_blueprint_payload(UserBlueprint, metadata={}): + + err = {"error": "", "code": -1} + + if UserBlueprint is None: + err["error"] = "Given blueprint is empty." + return None, err + + if not isinstance(UserBlueprint, (BlueprintType, SimpleBlueprintType)): + err["error"] = "Given blueprint is not of type Blueprint" + return None, err + + spec = { + "name": UserBlueprint.__name__, + "description": UserBlueprint.__doc__ or "", + "resources": UserBlueprint, + } + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + project_config = ContextObj.get_project_config() + config_categories = ContextObj.get_categories_config() + + # Set the blueprint name and kind correctly + metadata["name"] = UserBlueprint.__name__ + metadata["kind"] = "blueprint" + + # Project will be taken from config if not provided + if not metadata.get("project_reference", {}): + project_name = project_config["name"] + metadata["project_reference"] = Ref.Project(project_name) + + # User will be taken from config if not provided + if not metadata.get("owner_reference", {}): + user_name = server_config["pc_username"] + metadata["owner_reference"] = Ref.User(user_name) + + # Categories will be taken from config if not provided + if not metadata.get("categories", {}): + metadata["categories"] = config_categories + + metadata["kind"] = "blueprint" + UserBlueprintPayload = _blueprint_payload() + UserBlueprintPayload.metadata = metadata + UserBlueprintPayload.spec = spec + + return UserBlueprintPayload, None diff --git a/framework/calm/dsl/builtins/models/brownfield.py b/framework/calm/dsl/builtins/models/brownfield.py new file mode 100644 index 0000000..d9dd0df --- /dev/null +++ b/framework/calm/dsl/builtins/models/brownfield.py @@ -0,0 +1,734 @@ +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .deployment import DeploymentType +from .metadata_payload import get_metadata_obj + +from .helper import common as common_helper +from calm.dsl.config import get_context +from calm.dsl.store import Cache +from calm.dsl.api import get_api_client +from calm.dsl.constants import CACHE, PROVIDER_ACCOUNT_TYPE_MAP +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def match_vm_data( + vm_name, + vm_address_list, + vm_id, + instance_name=None, + instance_address=[], + instance_id=None, +): + """Returns True/False based on if vm data is matched with provided instance data""" + + if instance_id: # Case when ip address is given + if instance_id == vm_id: + # If supplied ip_addresses not found in given instance, raise error + if not set(instance_address).issubset(set(vm_address_list)): + diff_ips = set(instance_address).difference(set(vm_address_list)) + LOG.error( + "IP Address {} not found in instance(id={})".format(diff_ips, vm_id) + ) + sys.exit(-1) + + # If supplied name not matched with instance name, raise error + if instance_name and instance_name != vm_name: + LOG.error( + "Provided instance_name ({}) not matched with server instance name ({})".format( + instance_name, vm_name + ) + ) + sys.exit(-1) + + # If checks are correct, return True + return True + + elif instance_address: # Case when ip_address is given + if set(instance_address).issubset(set(vm_address_list)): + # If supplied name not matched with instance name, raise error + if instance_name and instance_name != vm_name: + LOG.error( + "Provided instance_name ({}) not matched with server instance name ({})".format( + instance_name, vm_name + ) + ) + sys.exit(-1) + + # If checks are correct, return True + return True + + elif instance_name == vm_name: # Case when instance_name is provided + return True + + # If not matched by any check return False + return False + + +# TODO merge provider specific helpers into one +def get_ahv_bf_vm_data( + project_uuid, account_uuid, instance_name=None, ip_address=[], instance_id=None +): + """Return ahv vm data matched with provided instacne details""" + + if not instance_id: + if not (instance_name or ip_address): + LOG.error("One of 'instance_name' or 'ip_address' must be given.") + sys.exit(-1) + + client = get_api_client() + res, err = client.account.read(account_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + clusters = res["status"]["resources"]["data"].get( + "cluster_account_reference_list", [] + ) + if not clusters: + LOG.error("No cluster found in ahv account (uuid='{}')".format(account_uuid)) + sys.exit(-1) + + # TODO Cluster should be a part of project whitelisted clusters. Change after jira is resolved + # Jira: https://jira.nutanix.com/browse/CALM-20205 + cluster_uuid = clusters[0]["uuid"] + + params = { + "length": 1000, + "offset": 0, + "filter": "project_uuid=={};account_uuid=={}".format( + project_uuid, cluster_uuid + ), + } + res, err = client.blueprint.brownfield_vms(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if not res["metadata"]["total_matches"]: + LOG.error( + "No nutanix brownfield vms found on account(uuid='{}') and project(uuid='{}')".format( + account_uuid, project_uuid + ) + ) + sys.exit(-1) + + res_vm_data = None + for entity in res["entities"]: + e_resources = entity["status"]["resources"] + e_name = e_resources["instance_name"] + e_id = e_resources["instance_id"] + e_address = e_resources["address"] + e_address_list = e_resources["address_list"] + + if match_vm_data( + vm_name=e_name, + vm_address_list=e_address_list, + vm_id=e_id, + instance_name=instance_name, + instance_address=ip_address, + instance_id=instance_id, + ): + if res_vm_data: + # If there is an existing vm with provided configuration + LOG.error( + "Multiple vms with same name ({}) found".format(instance_name) + ) + sys.exit(-1) + + res_vm_data = { + "instance_name": e_name, + "instance_id": e_id, + "address": ip_address or e_address, + } + + # If vm not found raise error + if not res_vm_data: + LOG.error( + "No nutanix brownfield vm with details (name='{}', address='{}', id='{}') found on account(uuid='{}') and project(uuid='{}')".format( + instance_name, ip_address, instance_id, account_uuid, project_uuid + ) + ) + sys.exit(-1) + + return res_vm_data + + +def get_aws_bf_vm_data( + project_uuid, account_uuid, instance_name=None, ip_address=[], instance_id=None +): + """Return aws vm data matched with provided instacne details""" + + client = get_api_client() + + params = { + "length": 250, + "offset": 0, + "filter": "project_uuid=={};account_uuid=={}".format( + project_uuid, account_uuid + ), + } + res, err = client.blueprint.brownfield_vms(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if not res["metadata"]["total_matches"]: + LOG.error( + "No aws brownfield vms found on account(uuid='{}') and project(uuid='{}')".format( + account_uuid, project_uuid + ) + ) + sys.exit(-1) + + res_vm_data = None + for entity in res["entities"]: + e_resources = entity["status"]["resources"] + e_name = e_resources["instance_name"] + e_id = e_resources["instance_id"] + e_address = e_resources["address"] + e_address_list = e_resources["public_ip_address"] + + if match_vm_data( + vm_name=e_name, + vm_address_list=e_address_list, + vm_id=e_id, + instance_name=instance_name, + instance_address=ip_address, + instance_id=instance_id, + ): + if res_vm_data: + # If there is an existing vm with provided configuration + LOG.error( + "Multiple vms with same name ({}) found".format(instance_name) + ) + sys.exit(-1) + + res_vm_data = { + "instance_name": e_name, + "instance_id": e_id, + "address": ip_address or e_address, + } + + # If vm not found raise error + if not res_vm_data: + LOG.error( + "No aws brownfield vm with details (name='{}', address='{}', id='{}') found on account(uuid='{}') and project(uuid='{}')".format( + instance_name, ip_address, instance_id, account_uuid, project_uuid + ) + ) + sys.exit(-1) + + return res_vm_data + + +def get_azure_bf_vm_data( + project_uuid, account_uuid, instance_name=None, ip_address=[], instance_id=None +): + """Return azure vm data matched with provided instacne details""" + + client = get_api_client() + + if instance_name: + filter = "instance_name=={};project_uuid=={};account_uuid=={}".format( + instance_name, project_uuid, account_uuid + ) + else: + filter = "project_uuid=={};account_uuid=={}".format(project_uuid, account_uuid) + params = {"length": 1000, "offset": 0, "filter": filter} + res, err = client.blueprint.brownfield_vms(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if not res["metadata"]["total_matches"]: + LOG.error( + "No azure brownfield vms found on account(uuid='{}') and project(uuid='{}')".format( + account_uuid, project_uuid + ) + ) + sys.exit(-1) + + res_vm_data = None + for entity in res["entities"]: + e_resources = entity["status"]["resources"] + e_name = e_resources["instance_name"] + e_id = e_resources["instance_id"] + e_address = e_resources["address"] + e_address_list = e_resources["public_ip_address"] + e_private_address = e_resources["private_ip_address"] + if (not e_address_list) and e_private_address: + e_address = [e_private_address] + e_address_list = e_private_address + + if match_vm_data( + vm_name=e_name, + vm_address_list=e_address_list, + vm_id=e_id, + instance_name=instance_name, + instance_address=ip_address, + instance_id=instance_id, + ): + if res_vm_data: + # If there is an existing vm with provided configuration + LOG.error( + "Multiple vms with same name ({}) found".format(instance_name) + ) + sys.exit(-1) + + res_vm_data = { + "instance_name": e_name, + "instance_id": e_id, + "address": ip_address or e_address, + "platform_data": {"resource_group": e_resources["resource_group"]}, + } + + # If vm not found raise error + if not res_vm_data: + LOG.error( + "No azure brownfield vm with details (name='{}', address='{}', id='{}') found on account(uuid='{}') and project(uuid='{}')".format( + instance_name, ip_address, instance_id, account_uuid, project_uuid + ) + ) + sys.exit(-1) + + return res_vm_data + + +def get_vmware_bf_vm_data( + project_uuid, account_uuid, instance_name=None, ip_address=[], instance_id=None +): + """Return vmware vm data matched with provided instacne details""" + + client = get_api_client() + + params = { + "length": 250, + "offset": 0, + "filter": "project_uuid=={};account_uuid=={}".format( + project_uuid, account_uuid + ), + } + res, err = client.blueprint.brownfield_vms(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if not res["metadata"]["total_matches"]: + LOG.error( + "No vmware brownfield vms found on account(uuid='{}') and project(uuid='{}')".format( + account_uuid, project_uuid + ) + ) + sys.exit(-1) + + res_vm_data = None + for entity in res["entities"]: + e_resources = entity["status"]["resources"] + e_name = e_resources["instance_name"] + e_id = e_resources["instance_id"] + e_address = e_resources["address"] + e_address_list = e_resources["guest.ipAddress"] + + if match_vm_data( + vm_name=e_name, + vm_address_list=e_address_list, + vm_id=e_id, + instance_name=instance_name, + instance_address=ip_address, + instance_id=instance_id, + ): + if res_vm_data: + # If there is an existing vm with provided configuration + LOG.error( + "Multiple vms with same name ({}) found".format(instance_name) + ) + sys.exit(-1) + + res_vm_data = { + "instance_name": e_name, + "instance_id": e_id, + "address": ip_address or e_address, + } + + # If vm not found raise error + if not res_vm_data: + LOG.error( + "No vmware brownfield vm with details (name='{}', address='{}', id='{}') found on account(uuid='{}') and project(uuid='{}')".format( + instance_name, ip_address, instance_id, account_uuid, project_uuid + ) + ) + sys.exit(-1) + + return res_vm_data + + +def get_gcp_bf_vm_data( + project_uuid, account_uuid, instance_name=None, ip_address=[], instance_id=None +): + """Return gcp vm data matched with provided instacne details""" + + client = get_api_client() + + params = { + "length": 250, + "offset": 0, + "filter": "project_uuid=={};account_uuid=={}".format( + project_uuid, account_uuid + ), + } + res, err = client.blueprint.brownfield_vms(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if not res["metadata"]["total_matches"]: + LOG.error( + "No gcp brownfield vms found on account(uuid='{}') and project(uuid='{}')".format( + account_uuid, project_uuid + ) + ) + sys.exit(-1) + + res_vm_data = None + for entity in res["entities"]: + e_resources = entity["status"]["resources"] + e_name = e_resources["instance_name"] + e_id = e_resources["id"] + e_address = e_resources["address"] + e_address_list = e_resources["natIP"] + + if match_vm_data( + vm_name=e_name, + vm_address_list=e_address_list, + vm_id=e_id, + instance_name=instance_name, + instance_address=ip_address, + instance_id=instance_id, + ): + if res_vm_data: + # If there is an existing vm with provided configuration + LOG.error( + "Multiple vms with same name ({}) found".format(instance_name) + ) + sys.exit(-1) + + res_vm_data = { + "instance_name": e_name, + "instance_id": e_id, + "address": ip_address or e_address, + } + + # If vm not found raise error + if not res_vm_data: + LOG.error( + "No gcp brownfield vm with details (name='{}', address='{}', id='{}') found on account(uuid='{}') and project(uuid='{}')".format( + instance_name, ip_address, instance_id, account_uuid, project_uuid + ) + ) + sys.exit(-1) + + return res_vm_data + + +# Brownfield Vm + + +class BrownfiedVmType(EntityType): + __schema_name__ = "BrownfieldVm" + __openapi_type__ = "app_brownfield_vm" + + def get_profile_environment(cls): + """ + returns the env configuration if present at brownfield vm's profile + """ + + environment = {} + cls_profile = common_helper._walk_to_parent_with_given_type(cls, "ProfileType") + environment = getattr(cls_profile, "environment", {}) + if environment: + LOG.debug( + "Found environment {} associated to app-profile {}".format( + environment.get("name"), cls_profile + ) + ) + else: + LOG.debug( + "No environment associated to the app-profile {}".format(cls_profile) + ) + + if environment: + environment = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment["uuid"] + ) + + return environment + + def get_substrate(cls): + """return substrate attached to brownfield vm's deployment""" + + cls_deployment = common_helper._walk_to_parent_with_given_type( + cls, "BrownfieldDeploymentType" + ) + + if cls_deployment and getattr(cls_deployment, "substrate", None): + return cls_deployment.substrate.__self__ + + return None + + def get_account_uuid(cls): + """returns the account_uuid configured for given brwonfield vm""" + + project_cache_data = common_helper.get_cur_context_project() + environment_cache_data = cls.get_profile_environment() + cls_substrate = cls.get_substrate() + + provider_type = cls.provider + + # account_uuid is attached to brownfield instances if a + # blueprint is launched with runtime brownfield deployments + account_uuid = getattr(cls, "account_uuid", "") + if cls_substrate: + account = getattr(cls_substrate, "account", dict()) or dict() + if account: + account_uuid = account["uuid"] + + if not account_uuid: + if environment_cache_data: + accounts = environment_cache_data["accounts_data"].get( + PROVIDER_ACCOUNT_TYPE_MAP[provider_type], [] + ) + if not accounts: + LOG.error( + "No {} account regsitered in environment".format(provider_type) + ) + sys.exit(-1) + + else: + accounts = project_cache_data["accounts_data"].get( + PROVIDER_ACCOUNT_TYPE_MAP[provider_type], [] + ) + if not accounts: + LOG.error( + "No {} account regsitered in environment".format(provider_type) + ) + sys.exit(-1) + + account_uuid = accounts[0] + + return account_uuid + + def compile(cls): + + cdict = super().compile() + provider_type = cdict.pop("provider") + + project_cache_data = common_helper.get_cur_context_project() + account_uuid = cls.get_account_uuid() + + project_uuid = project_cache_data.get("uuid") + + if provider_type == "AHV_VM": + cdict = get_ahv_bf_vm_data( + project_uuid=project_uuid, + account_uuid=account_uuid, + instance_name=cdict["instance_name"], + ip_address=cdict["address"], + instance_id=cdict["instance_id"], + ) + + elif provider_type == "AWS_VM": + cdict = get_aws_bf_vm_data( + project_uuid=project_uuid, + account_uuid=account_uuid, + instance_name=cdict["instance_name"], + ip_address=cdict["address"], + instance_id=cdict["instance_id"], + ) + + elif provider_type == "AZURE_VM": + cdict = get_azure_bf_vm_data( + project_uuid=project_uuid, + account_uuid=account_uuid, + instance_name=cdict["instance_name"], + ip_address=cdict["address"], + instance_id=cdict["instance_id"], + ) + + elif provider_type == "VMWARE_VM": + cdict = get_vmware_bf_vm_data( + project_uuid=project_uuid, + account_uuid=account_uuid, + instance_name=cdict["instance_name"], + ip_address=cdict["address"], + instance_id=cdict["instance_id"], + ) + + elif provider_type == "GCP_VM": + cdict = get_gcp_bf_vm_data( + project_uuid=project_uuid, + account_uuid=account_uuid, + instance_name=cdict["instance_name"], + ip_address=cdict["address"], + instance_id=cdict["instance_id"], + ) + + else: + LOG.error( + "Support for {} provider's brownfield vm not available".format( + provider_type + ) + ) + sys.exit(-1) + + return cdict + + +class BrownfieldVmValidator(PropertyValidator, openapi_type="app_brownfield_vm"): + __default__ = None + __kind__ = BrownfiedVmType + + +def brownfield_vm(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return BrownfiedVmType(name, bases, kwargs) + + +# Brownfield Deployment + + +class BrownfieldDeploymentType(DeploymentType): + __schema_name__ = "BrownfieldDeployment" + __openapi_type__ = "app_brownfield_deployment" + + def get_ref(cls, kind=None): + """Note: app_blueprint_deployment kind to be used for pod deployment""" + return super().get_ref(kind=DeploymentType.__openapi_type__) + + def compile(cls): + cdict = super().compile() + + # Constants from UI + cdict["min_replicas"] = "1" + cdict["max_replicas"] = "1" + return cdict + + +class BrownfieldDeploymentValidator( + PropertyValidator, openapi_type="app_brownfield_deployment" +): + __default__ = None + __kind__ = BrownfieldDeploymentType + + +def brownfield_deployment(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return BrownfieldDeploymentType(name, bases, kwargs) + + +BrownfieldDeployment = brownfield_deployment() + + +class Brownfield: + Deployment = BrownfieldDeployment + + class Vm: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "AHV_VM", + } + return brownfield_vm(**kwargs) + + class Ahv: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "AHV_VM", + } + return brownfield_vm(**kwargs) + + class Aws: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "AWS_VM", + } + return brownfield_vm(**kwargs) + + class Azure: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "AZURE_VM", + } + return brownfield_vm(**kwargs) + + class Gcp: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "GCP_VM", + } + return brownfield_vm(**kwargs) + + class Vmware: + def __new__(cls, instance_name=None, ip_address=[], instance_id=None): + """Vms are searched using these ways: + 1. If instance_id is given will search using that + 2. Else Search using ip_address if given + 3. Else Search using name + """ + + kwargs = { + "instance_name": instance_name, + "address": ip_address, + "instance_id": instance_id, + "provider": "VMWARE_VM", + } + return brownfield_vm(**kwargs) diff --git a/framework/calm/dsl/builtins/models/calm_ref.py b/framework/calm/dsl/builtins/models/calm_ref.py new file mode 100644 index 0000000..9001c45 --- /dev/null +++ b/framework/calm/dsl/builtins/models/calm_ref.py @@ -0,0 +1,502 @@ +import sys +import uuid + +from calm.dsl.db.table_config import AhvSubnetsCache + +from .entity import Entity, EntityType +from .validator import PropertyValidator +from .helper import common as common_helper + +from .ahv_vm_cluster import AhvCluster +from .ahv_vm_vpc import AhvVpc + +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.api.handle import get_api_client +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + + +# CalmRef + + +class CalmRefType(EntityType): + """Metaclass for calm references""" + + __schema_name__ = "CalmRef" + __openapi_type__ = "app_calm_ref" + + def compile(cls): + """compiles the calm_ref object""" + + ref_cls = getattr(cls, "__ref_cls__") + user_attrs = cls.get_user_attrs() + return ref_cls.compile(cls, **user_attrs) + + def __getitem__(cls, key): + """return the vale in compiled class payload""" + data = cls.compile() + return data[key] + + +class CalmRefValidator(PropertyValidator, openapi_type="app_calm_ref"): + __default__ = None + __kind__ = CalmRefType + + +def _calm_ref(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return CalmRefType(name, bases, kwargs) + + +class Ref: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class Subnet: + def __new__(cls, **kwargs): + kwargs["__ref_cls__"] = cls + return _calm_ref(**kwargs) + + def compile(cls, name, **kwargs): + + cluster = kwargs.get("cluster") + vpc = kwargs.get("vpc") + account_uuid = kwargs.get("account_uuid") + subnet_cache_data = None + try: + provider_obj = cls.__parent__ + subnet_account = provider_obj.account_reference.get_dict() + account_uuid = subnet_account.get("uuid") + + except Exception: + pass + + LOG.debug("Searching for subnet with name: {}".format(name)) + subnet_cache_data = AhvSubnetsCache.get_entity_data( + name, cluster=cluster, vpc=vpc, account_uuid=account_uuid + ) + + if not subnet_cache_data: + raise Exception( + "AHV Subnet {} not found. Please run: calm update cache".format( + name + ) + ) + + return {"kind": "subnet", "name": name, "uuid": subnet_cache_data["uuid"]} + + class User: + def __new__(cls, name, **kwargs): + + directory = kwargs.get("directory") or "" + display_name = kwargs.get("display_name") or "" + user_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.USER, + name=name, + directory=directory, + display_name=display_name, + ) + + if not user_cache_data: + raise Exception( + "User {} not found. Please run: calm update cache".format(name) + ) + + return {"kind": "user", "name": name, "uuid": user_cache_data["uuid"]} + + class Group: + def __new__(cls, name, **kwargs): + + directory = kwargs.get("directory") or "" + display_name = kwargs.get("display_name") or "" + user_group_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.USER_GROUP, + name=name, + directory=directory, + display_name=display_name, + ) + + if not user_group_cache_data: + raise Exception( + "User Group {} not found. Please run: calm update cache".format( + name + ) + ) + + return { + "kind": "user_group", + "name": name, + "uuid": user_group_cache_data["uuid"], + } + + class Account: + def __new__(cls, name, **kwargs): + + kwargs["__ref_cls__"] = cls + kwargs["name"] = name + return _calm_ref(**kwargs) + + def compile(cls, name, **kwargs): + + provider_type = kwargs.get("provider_type") or "" + account_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.ACCOUNT, + name=name, + provider_type=provider_type, + ) + + if not account_cache_data: + raise Exception( + "Account {} not found. Please run: calm update cache".format(name) + ) + + return {"kind": "account", "name": name, "uuid": account_cache_data["uuid"]} + + class Role: + def __new__(cls, name, **kwargs): + + role_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.ROLE, name=name + ) + if not role_cache_data: + raise Exception( + "Role {} not found. Please run: calm update cache".format(name) + ) + return {"kind": "role", "name": name, "uuid": role_cache_data["uuid"]} + + class Project: + def __new__(cls, name, **kwargs): + + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=name + ) + if not project_cache_data: + raise Exception( + "Project {} not found. Please run: calm update cache".format(name) + ) + return {"kind": "project", "name": name, "uuid": project_cache_data["uuid"]} + + class Environment: + def __new__(cls, **kwargs): + + kwargs["__ref_cls__"] = cls + return _calm_ref(**kwargs) + + def compile(cls, name, **kwargs): + """cls = CalmRef object""" + + project_cache_data = common_helper.get_cur_context_project() + project_name = project_cache_data.get("name") + project_uuid = project_cache_data.get("uuid") + environment_cache_data = Cache.get_entity_data( + entity_type="environment", name=name, project_uuid=project_uuid + ) + if not environment_cache_data: + LOG.error( + "Environment '{}' not found in project '{}'. Please run: calm update cache".format( + name, project_name + ) + ) + sys.exit(-1) + + return { + "kind": "environment", + "name": name, + "uuid": environment_cache_data["uuid"], + } + + class DirectoryService: + def __new__(cls, name, **kwargs): + + ds_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.DIRECTORY_SERVICE, name=name + ) + if not ds_cache_data: + raise Exception( + "Directory Service {} not found. Please run: calm update cache".format( + name + ) + ) + return { + "kind": "directory_service", + "name": name, + "uuid": ds_cache_data["uuid"], + } + + class Vm: + def __new__(cls, **kwargs): + + kwargs["__ref_cls__"] = cls + return _calm_ref(**kwargs) + + def compile(cls, name="", **kwargs): + """cls = CalmRef object""" + + client = get_api_client() + account_uuid = "" + try: + account_ref = cls.__parent__.attrs.get("account_reference", {}) + account_uuid = account_ref.get("uuid", "") + except Exception as exp: + pass + + vm_uuid = kwargs.get("uuid", "") + + if name: + params = {"filter": "name=={}".format(name), "length": 250} + res, err = client.account.vms_list(account_uuid, params) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + if res["metadata"]["total_matches"] == 0: + LOG.error("No vm with name '{}' found".format(name)) + sys.exit(-1) + + elif res["metadata"]["total_matches"] > 1 and not vm_uuid: + LOG.error( + "Multiple vms with same name found. Please provide vm uuid" + ) + sys.exit(-1) + + elif not vm_uuid: + vm_uuid = res["entities"][0]["status"]["uuid"] + + # TODO add valdiations on suppiled uuid + vm_ref = {"uuid": vm_uuid, "kind": "vm"} + + # name is required parameter, else api will fail + vm_ref["name"] = ( + name if name else "_VM_NAME_{}".format(str(uuid.uuid4())[:10]) + ) + + return vm_ref + + class RecoveryPoint: + def __new__(cls, **kwargs): + + kwargs["__ref_cls__"] = cls + return _calm_ref(**kwargs) + + def compile(cls, name=None, **kwargs): + + cls_substrate = common_helper._walk_to_parent_with_given_type( + cls, "SubstrateType" + ) + account_uuid = ( + cls_substrate.get_referenced_account_uuid() if cls_substrate else "" + ) + account_uuid = account_uuid or kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account uuid not found") + sys.exit("Account not found for vm recovery point") + + vrs_uuid = kwargs.get("uuid", "") + payload = {"filter": "account_uuid=={}".format(account_uuid)} + if vrs_uuid: + payload["filter"] += ";uuid=={}".format(vrs_uuid) + else: + payload["filter"] += ";name=={}".format(name) + + client = get_api_client() + vrc_map = client.vm_recovery_point.get_name_uuid_map(payload) + + if not vrc_map: + log_msg = "No recovery point found with " + ( + "uuid='{}'".format(vrs_uuid) + if vrs_uuid + else "name='{}'".format(name) + ) + LOG.error(log_msg) + sys.exit("No recovery point found") + + # there will be single key + vrc_name = list(vrc_map.keys())[0] + vrc_uuid = vrc_map[vrc_name] + + if isinstance(vrc_uuid, list): + LOG.error( + "Multiple recovery points found with name='{}'. Please provide uuid.".format( + vrc_name + ) + ) + LOG.debug("Found recovery point uuids: {}".format(vrc_uuid)) + sys.exit("Multiple recovery points found") + + return { + "kind": "vm_recovery_point", + "name": vrc_name, + "uuid": vrc_uuid, + } + + class Cluster: + def __new__(cls, name=None, account_name=None, **kwargs): + + kwargs["__ref_cls__"] = cls + kwargs["account_name"] = account_name + kwargs["name"] = name + return _calm_ref(**kwargs) + + def compile(cls, name=None, **kwargs): + + if name.startswith("@@{") and name.endswith("}@@"): + return {"kind": "cluster", "uuid": name} + account_name = kwargs.get("account_name", None) + if account_name: + cache_acc_data = Cache.get_entity_data( + CACHE.ENTITY.ACCOUNT, account_name + ) + if not cache_acc_data: + LOG.error( + "Failed to find account with name: {}".format(account_name) + ) + sys.exit("Account name={} not found.".format(account_name)) + + # We found the account + cache_cluster_data = Cache.get_entity_data( + CACHE.ENTITY.AHV_CLUSTER, name, account_uuid=cache_acc_data["uuid"] + ) + if not cache_cluster_data: + LOG.error( + "Failed to find cluster with name: {}, account: {}".format( + name, account_name + ) + ) + sys.exit("Cluster name={} not found".format(name)) + return { + "kind": "cluster", + "name": cache_cluster_data["name"], + "uuid": cache_cluster_data["uuid"], + } + else: + cdict = AhvCluster(name).compile() + return { + "kind": "cluster", + "name": cdict["name"], + "uuid": cdict["uuid"], + } + + class Vpc: + def __new__(cls, name=None, account_name=None, **kwargs): + + kwargs["__ref_cls__"] = cls + kwargs["account_name"] = account_name + kwargs["name"] = name + return _calm_ref(**kwargs) + + def compile(cls, name=None, **kwargs): + + account_name = kwargs.get("account_name", "") + if account_name: + + cache_acc_data = Cache.get_entity_data( + CACHE.ENTITY.ACCOUNT, account_name + ) + if not cache_acc_data: + LOG.error( + "Failed to find account with name: {}".format(account_name) + ) + sys.exit("Account name={} not found.".format(account_name)) + + # We found the account + cache_vpc_data = Cache.get_entity_data( + CACHE.ENTITY.AHV_VPC, name, account_uuid=cache_acc_data["uuid"] + ) + if not cache_vpc_data: + LOG.error( + "Failed to find vpc with name: {}, account: {}".format( + name, account_name + ) + ) + sys.exit("VPC name={} not found".format(name)) + return { + "kind": "vpc", + "name": cache_vpc_data["name"], + "uuid": cache_vpc_data["uuid"], + } + else: + cdict = AhvVpc(name).compile() + + return { + "kind": "vpc", + "name": cdict["name"], + "uuid": cdict["uuid"], + } + + class Resource_Type: + def __new__(cls, name, **kwargs): + kwargs["__ref_cls__"] = cls + kwargs["name"] = name + return _calm_ref(**kwargs) + + def compile(cls, name, **kwargs): + + client = get_api_client() + + if name: + params = {"filter": "name=={}".format(name), "length": 250} + res, err = client.resource_types.list(params) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + if res["metadata"]["total_matches"] == 0: + LOG.error("No vm with name '{}' found".format(name)) + sys.exit(-1) + + elif res["metadata"]["total_matches"] > 1: + LOG.error( + "Multiple resource type with same name found. " + "Please provide resource type uuid" + ) + sys.exit(-1) + + resource_type_uuid = res["entities"][0]["status"]["uuid"] + else: + LOG.error( + "Resource type name not passed, " "pls pass resource type name" + ) + sys.exit(-1) + + resource_type_ref = { + "uuid": resource_type_uuid, + "name": name, + "kind": "resource_type", + } + + return resource_type_ref + + class Tunnel: + def __new__(cls, name, **kwargs): + kwargs["__ref_cls__"] = cls + kwargs["name"] = name + return _calm_ref(**kwargs) + + def compile(cls, name, **kwargs): + tunnel_uuid = "" + if name: + cache_vpc_data = Cache.get_entity_data( + CACHE.ENTITY.AHV_VPC, None, tunnel_name=name + ) + if not cache_vpc_data: + LOG.error("Failed to find Tunnel with name: {}".format(name)) + sys.exit("Tunnel name={} not found".format(name)) + tunnel_uuid = cache_vpc_data.get("tunnel_uuid") + LOG.debug("Tunnel UUID: {}".format(tunnel_uuid)) + else: + LOG.error("Tunnel name not passed, " "pls pass Tunnel name") + sys.exit(-1) + + tunnel_ref = { + "uuid": tunnel_uuid, + "name": name, + "kind": "tunnel", + } + + return tunnel_ref diff --git a/framework/calm/dsl/builtins/models/client_attrs.py b/framework/calm/dsl/builtins/models/client_attrs.py new file mode 100644 index 0000000..2dec2ee --- /dev/null +++ b/framework/calm/dsl/builtins/models/client_attrs.py @@ -0,0 +1,38 @@ +import copy +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) +DSL_METADATA_MAP = { + "Service": {}, + "Package": {}, + "Deployment": {}, + "Profile": {}, + "Substrate": {}, +} +# TODO Check for credential + + +def update_dsl_metadata_map(entity_type, entity_name, entity_obj): + global DSL_METADATA_MAP + if entity_type not in DSL_METADATA_MAP: + return + + DSL_METADATA_MAP[entity_type][entity_name] = entity_obj + + +def get_dsl_metadata_map(context=[]): + global DSL_METADATA_MAP + + metadata = copy.deepcopy(DSL_METADATA_MAP) + for c in context: + if c in metadata: + metadata = metadata[c] + else: + return + + return metadata + + +def init_dsl_metadata_map(metadata): + global DSL_METADATA_MAP + DSL_METADATA_MAP = metadata diff --git a/framework/calm/dsl/builtins/models/config_attrs.py b/framework/calm/dsl/builtins/models/config_attrs.py new file mode 100644 index 0000000..ab57a85 --- /dev/null +++ b/framework/calm/dsl/builtins/models/config_attrs.py @@ -0,0 +1,89 @@ +import re +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .task import CalmTask, create_call_config, dag +from .ref import ref +from .action import action, _action_create +from .runbook import runbook_create +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + + +class AhvDiskRuleset(EntityType): + __schema_name__ = "AhvDiskRuleset" + __openapi_type__ = "ahv_disk_rule" + + +class AhvDiskRulesetValidator(PropertyValidator, openapi_type="ahv_disk_rule"): + __default__ = None + __kind__ = AhvDiskRuleset + + +def ahv_disk_ruleset(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvDiskRuleset(name, bases, kwargs) + + +AhvDiskRulesetField = ahv_disk_ruleset() + + +class AhvNicRuleset(EntityType): + __schema_name__ = "AhvNicRuleset" + __openapi_type__ = "ahv_nic_rule" + + +class AhvNicRulesetValidator(PropertyValidator, openapi_type="ahv_nic_rule"): + __default__ = None + __kind__ = AhvNicRuleset + + +def ahv_nic_ruleset(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AhvNicRuleset(name, bases, kwargs) + + +AhvNicRulesetField = ahv_nic_ruleset() + + +class PatchDataField(EntityType): + __schema_name__ = "PatchDataField" + __openapi_type__ = "patch_data_field" + + +class PatchDataFieldValidator(PropertyValidator, openapi_type="patch_data_field"): + __default__ = None + __kind__ = PatchDataField + + +def patch_data_field(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return PatchDataField(name, bases, kwargs) + + +AhvPatchDataField = patch_data_field() + + +class ConfigAttrs(EntityType): + __schema_name__ = "ConfigAttrs" + __openapi_type__ = "config_attrs" + + +class ConfigAttrsValidator(PropertyValidator, openapi_type="config_attrs"): + __default__ = None + __kind__ = ConfigAttrs + + +def config_attrs(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return ConfigAttrs(name, bases, kwargs) + + +AhvUpdateConfigAttrs = config_attrs() diff --git a/framework/calm/dsl/builtins/models/config_spec.py b/framework/calm/dsl/builtins/models/config_spec.py new file mode 100644 index 0000000..f61c49d --- /dev/null +++ b/framework/calm/dsl/builtins/models/config_spec.py @@ -0,0 +1,299 @@ +import inspect +import json +import uuid +import os +import sys + +from .entity import EntityType, Entity +from .ref import ref +from .utils import read_file +from .validator import PropertyValidator +from .variable import CalmVariable +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class UpdateConfig: + def __new__( + cls, + name, + target=None, + patch_attrs=None, + ): + return patch_config_create(name, target, patch_attrs) + + +class ConfigSpecType(EntityType): + __schema_name__ = "ConfigSpec" + __openapi_type__ = "app_config_spec" + + def get_ref(cls, kind=None): + """Note: app_blueprint_deployment kind to be used for pod deployment""" + return super().get_ref(kind=ConfigSpecType.__openapi_type__) + + def compile(cls): + cdict = super().compile() + if "patch_attrs" not in cdict or len(cdict["patch_attrs"]) == 0: + cdict.pop("patch_attrs", None) + return cdict + attrs = cdict.pop("patch_attrs")[0] + categories_data = [] + categories = attrs.categories + for op_category in categories: + for op in op_category["val"]: + val = {} + val["operation"] = op_category["operation"] + val["value"] = op + categories_data.append(val) + memory = attrs.memory + if memory.min_value: + memory.min_value = memory.min_value * 1024 + if memory.max_value: + memory.max_value = memory.max_value * 1024 + if memory.value: + memory.value = str(int(float(memory.value) * 1024)) + target = cdict["attrs_list"][0]["target_any_local_reference"] + disk_data = [] + disks = attrs.disks + adapter_name_index_map = {} + for disk in disks: + if disk.disk_operation in ["delete", "modify"]: + val = target.__self__.substrate.__self__.provider_spec.resources.disks[ + disk.index + ].compile() + elif disk.disk_operation in ["add"]: + val = disk.disk_value.compile() + val["operation"] = disk.disk_operation + + dtype = val["device_properties"]["disk_address"]["adapter_type"] + if disk.operation != "add": + if dtype not in adapter_name_index_map: + adapter_name_index_map[dtype] = 0 + else: + adapter_name_index_map[dtype] += 1 + val["device_properties"]["disk_address"][ + "device_index" + ] = adapter_name_index_map[dtype] + if disk.operation == "": + disk.operation = "equal" + if disk.value and disk.value != "0": + val["disk_size_mib"] = {} + val["disk_size_mib"]["editable"] = disk.editable + val["disk_size_mib"]["operation"] = disk.operation + val["disk_size_mib"]["value"] = str(int(float(disk.value) * 1024)) + else: + prev = val["disk_size_mib"] + if not isinstance(prev, dict): + val["disk_size_mib"] = {} + val["disk_size_mib"]["editable"] = disk.editable + val["disk_size_mib"]["operation"] = disk.operation + val["disk_size_mib"]["value"] = str(prev) + if disk.min_value: + val["disk_size_mib"]["min_value"] = disk.min_value * 1024 + if disk.max_value: + val["disk_size_mib"]["max_value"] = disk.max_value * 1024 + val.pop("bootable", None) + disk_data.append(val) + nic_data = [] + nics = attrs.nics + counter = 1 + for nic in nics: + if nic.operation in ["delete", "modify"]: + val = target.__self__.substrate.__self__.provider_spec.resources.nics[ + int(nic.index) + ].compile() + elif nic.operation in ["add"]: + val = nic.nic_value.compile() + nic.index = "A{}".format(counter) + counter += 1 + val["operation"] = nic.operation + val["editable"] = nic.editable + val["identifier"] = str(nic.index) + nic_data.append(val) + + data = { + "type": "nutanix", + "nic_delete_allowed": attrs.nic_delete, + "categories_delete_allowed": attrs.categories_delete, + "categories_add_allowed": attrs.categories_add, + "disk_delete_allowed": attrs.disk_delete, + "num_sockets_ruleset": attrs.numsocket.get_all_attrs(), + "memory_size_mib_ruleset": memory.get_all_attrs(), + "num_vcpus_per_socket_ruleset": attrs.vcpu.get_all_attrs(), + "pre_defined_disk_list": disk_data, + "pre_defined_nic_list": nic_data, + "pre_defined_categories": categories_data, + } + cdict["attrs_list"][0]["data"] = data + return cdict + + +class PatchConfigSpecType(ConfigSpecType): + pass + + +class SnapshotConfigSpecType(ConfigSpecType): + pass + + +class RestoreConfigSpecType(ConfigSpecType): + pass + + +class ConfigSpecValidator(PropertyValidator, openapi_type="app_config_spec"): + __default__ = None + __kind__ = ConfigSpecType + + +CONFIG_SPEC_TYPE_MAP = { + "snapshot": SnapshotConfigSpecType, + "restore": RestoreConfigSpecType, + "patch": PatchConfigSpecType, +} + + +def _config_spec(config_type="snapshot", **kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + config = CONFIG_SPEC_TYPE_MAP[config_type](name, bases, kwargs) + return config + + +def _config_create(config_type="snapshot", **kwargs): + name = kwargs.get("name", kwargs.get("__name__", None)) + bases = (_config_spec(config_type),) + config = CONFIG_SPEC_TYPE_MAP[config_type](name, bases, kwargs) + return config + + +def patch_config_create( + name, + target=None, + patch_attrs=None, + description="", +): + attrs = { + "target_any_local_reference": target, + "data": {}, + "uuid": str(uuid.uuid4()), + } + kwargs = { + "name": name, + "description": description, + "attrs_list": [attrs], + "patch_attrs": [patch_attrs], + "type": "PATCH", + } + return _config_create(config_type="patch", **kwargs) + + +def snapshot_config_create( + name, + target=None, + snapshot_type="CRASH_CONSISTENT", + num_of_replicas="ONE", + config_references=[], + snapshot_location_type="LOCAL", + policy=None, + description="", +): + rule_ref = {} + if policy: + rule = policy.pop("rule_uuid", None) + client = get_api_client() + res, err = client.app_protection_policy.read(id=policy.get("uuid")) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit("Unable to retrieve protection policy details") + + res = res.json() + protection_rule_list = res["status"]["resources"]["app_protection_rule_list"] + + rule_ref["kind"] = "app_protection_rule" + if not rule: + if protection_rule_list and isinstance(protection_rule_list, list): + rule_ref["uuid"] = protection_rule_list[0]["uuid"] + rule_ref["name"] = protection_rule_list[0]["name"] + if protection_rule_list[0].get( + "remote_snapshot_retention_policy", None + ): + snapshot_location_type = "REMOTE" + else: + for pr in protection_rule_list: + if pr.get("uuid") == rule: + rule_ref["uuid"] = rule + rule_ref["name"] = pr.get("name") + if pr.get("remote_snapshot_retention_policy", None): + snapshot_location_type = "REMOTE" + + if "uuid" not in rule_ref: + LOG.error( + "No Protection Rule {} found under Protection Policy {}".format( + rule, res["metadata"]["name"] + ) + ) + sys.exit("Invalid protection rule") + + if config_references: + for config_ref in config_references: + config_ref.__self__.attrs_list[0][ + "snapshot_location_type" + ] = snapshot_location_type + + attrs = { + "target_any_local_reference": target, + "snapshot_location_type": snapshot_location_type, + "num_of_replicas": num_of_replicas, + } + if policy: + attrs["app_protection_policy_reference"] = policy + attrs["app_protection_rule_reference"] = rule_ref + snapshot_name = CalmVariable.Simple( + name, name="snapshot_name", runtime=True, is_mandatory=True + ) + snapshot_type = CalmVariable.Simple( + snapshot_type, name="snapshot_type", runtime=True, is_mandatory=True + ) + kwargs = { + "name": name, + "description": description, + "attrs_list": [attrs], + "type": "", # Set at profile level during compile + "variables": [snapshot_name, snapshot_type], + "config_references": config_references, + } + return _config_create(config_type="snapshot", **kwargs) + + +def restore_config_create( + name, + target, + snapshot_location_type="LOCAL", + delete_vm_post_restore=False, + description="", +): + attrs = { + "target_any_local_reference": target, + "delete_vm_post_restore": delete_vm_post_restore, + "snapshot_location_type": snapshot_location_type, + } + snapshot_uuids = CalmVariable.Simple( + "", name="snapshot_uuids", runtime=True, is_mandatory=True + ) + delete_vm_post_restore = CalmVariable.Simple( + str(delete_vm_post_restore).lower(), + name="delete_vm_post_restore", + runtime=True, + is_mandatory=True, + ) + kwargs = { + "name": name, + "description": description, + "attrs_list": [attrs], + "type": "", # Set at profile level based on target + "variables": [snapshot_uuids, delete_vm_post_restore], + } + return _config_create(config_type="restore", **kwargs) diff --git a/framework/calm/dsl/builtins/models/constants.py b/framework/calm/dsl/builtins/models/constants.py new file mode 100644 index 0000000..2548864 --- /dev/null +++ b/framework/calm/dsl/builtins/models/constants.py @@ -0,0 +1,31 @@ +class TASK_INPUT: + class TYPE: + TEXT = "text" + PASSWORD = "password" + CHECKBOX = "checkbox" + SELECT = "select" + SELECTMULTIPLE = "selectmultiple" + DATE = "date" + TIME = "time" + DATETIME = "datetime" + + VALID_TYPES = [ + TYPE.TEXT, + TYPE.PASSWORD, + TYPE.CHECKBOX, + TYPE.SELECT, + TYPE.SELECTMULTIPLE, + TYPE.DATE, + TYPE.TIME, + TYPE.DATETIME, + ] + + +class SYSTEM_ACTIONS: + CREATE = "create" + START = "start" + RESTART = "restart" + UPDATE = "update" + STOP = "stop" + DELETE = "delete" + SOFT_DELETE = "soft_delete" diff --git a/framework/calm/dsl/builtins/models/credential.py b/framework/calm/dsl/builtins/models/credential.py new file mode 100644 index 0000000..9baf0cc --- /dev/null +++ b/framework/calm/dsl/builtins/models/credential.py @@ -0,0 +1,164 @@ +import sys +from copy import deepcopy + +from distutils.version import LooseVersion as LV + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .utils import read_file +from calm.dsl.builtins import Ref +from calm.dsl.api.handle import get_api_client +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version + +LOG = get_logging_handle(__name__) + + +# Credential + + +class CredentialType(EntityType): + __schema_name__ = "Credential" + __openapi_type__ = "app_credential" + + def compile(cls): + cdict = super().compile() + cdict.pop("default", None) + if cdict["type"] == "PASSWORD": + cdict.pop("passphrase", None) + return cdict + + +class CredentialValidator(PropertyValidator, openapi_type="app_credential"): + __default__ = None + __kind__ = CredentialType + + +def _credential(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return CredentialType(name, bases, kwargs) + + +Credential = _credential() + + +def basic_cred( + username, + password="", + name="default", + default=False, + type="PASSWORD", + filename=None, + editables=dict(), +): + + if filename: + password = read_file(filename, depth=2) + + secret = {"attrs": {"is_secret_modified": True}, "value": password} + + kwargs = {} + kwargs["type"] = type + kwargs["username"] = username + kwargs["secret"] = secret + kwargs["name"] = name + kwargs["default"] = default + if editables: + kwargs["editables"] = editables + + return _credential(**kwargs) + + +def secret_cred( + username, + name="default", + secret="default", + type="PASSWORD", + default=False, + editables=dict(), +): + + # This secret value will be replaced when user is creatring a blueprint + secret = {"attrs": {"is_secret_modified": True}, "value": "", "secret": secret} + + kwargs = {} + kwargs["type"] = type + kwargs["username"] = username + kwargs["secret"] = secret + kwargs["name"] = name + kwargs["default"] = default + if editables: + kwargs["editables"] = editables + + return _credential(**kwargs) + + +def dynamic_cred( + username, + account, + resource_type=None, + variable_dict={}, + name="default", + default=False, + type="PASSWORD", + editables=dict(), +): + client = get_api_client() + secret = {"attrs": {"is_secret_modified": True}, "value": "@@{secret}@@"} + + # Below line is required as account ref returns uuid in account ref dict + # which is not required for dynamic cred cases + + kwargs = {} + kwargs["type"] = type + kwargs["username"] = username + kwargs["secret"] = secret + kwargs["name"] = name + kwargs["default"] = default + kwargs["cred_class"] = "dynamic" + kwargs["account"] = account + + if not resource_type: + resource_type = Ref.Resource_Type(account.name) + + if variable_dict: + resource_type_uuid = resource_type.compile()["uuid"] + res, err = client.resource_types.read(id=resource_type_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + resource_type_payload = res.json() + + cred_attr_list = ( + resource_type_payload.get("spec", {}) + .get("resources", {}) + .get("schema_list", {}) + ) + + if not cred_attr_list: + LOG.error("No Cred-Variables found in account") + sys.exit(-1) + + variable_list = list() + for cred_attr in cred_attr_list: + cred_attr_copy = deepcopy(cred_attr) + var_name = cred_attr["name"] + if var_name in variable_dict: + cred_attr_copy["value"] = variable_dict.pop(var_name) + + cred_attr_copy.pop("uuid", None) + variable_list.append(cred_attr_copy) + + if variable_dict: + LOG.error("Variables '{}' not found in account cred-attrs") + sys.exit("Unknown variables found in credential") + + kwargs["variable_list"] = variable_list + kwargs["resource_type"] = resource_type + + if editables: + kwargs["editables"] = editables + + return _credential(**kwargs) diff --git a/framework/calm/dsl/builtins/models/deployment.py b/framework/calm/dsl/builtins/models/deployment.py new file mode 100644 index 0000000..5d2471e --- /dev/null +++ b/framework/calm/dsl/builtins/models/deployment.py @@ -0,0 +1,51 @@ +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + + +# Deployment + + +class DeploymentType(EntityType): + __schema_name__ = "Deployment" + __openapi_type__ = "app_blueprint_deployment" + + def get_task_target(cls): + return cls.get_ref() + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + + if cdict["type"] == "K8S_DEPLOYMENT": + LOG.error("Decompilation support for pod deployments is not available.") + sys.exit(-1) + + return super().decompile(cdict, context=context, prefix=prefix) + + +class DeploymentValidator(PropertyValidator, openapi_type="app_blueprint_deployment"): + __default__ = None + __kind__ = DeploymentType + + +def deployment(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return DeploymentType(name, bases, kwargs) + + +Deployment = deployment() diff --git a/framework/calm/dsl/builtins/models/descriptor.py b/framework/calm/dsl/builtins/models/descriptor.py new file mode 100644 index 0000000..cf21a77 --- /dev/null +++ b/framework/calm/dsl/builtins/models/descriptor.py @@ -0,0 +1,21 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +class DescriptorType(EntityType): + __schema_name__ = "Descriptor" + __openapi_type__ = "app_descriptor" + + +class DescriptorValidator(PropertyValidator, openapi_type="app_descriptor"): + __default__ = None + __kind__ = DescriptorType + + +def _descriptor(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return DescriptorType(name, bases, kwargs) + + +Descriptor = _descriptor() diff --git a/framework/calm/dsl/builtins/models/endpoint.py b/framework/calm/dsl/builtins/models/endpoint.py new file mode 100644 index 0000000..cb46a65 --- /dev/null +++ b/framework/calm/dsl/builtins/models/endpoint.py @@ -0,0 +1,323 @@ +import enum +import uuid +import sys +from distutils.version import LooseVersion as LV + +from .entity import EntityType, Entity, EntityTypeBase +from .validator import DictValidator, PropertyValidator +from .credential import CredentialType +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + +# Endpoint + + +class EndpointType(EntityType): + __schema_name__ = "Endpoint" + __openapi_type__ = "app_endpoint" + + def compile(cls): + cdict = super().compile() + if (cdict.get("provider_type", "")) == "": + cdict.pop("provider_type", "") + if (cdict.get("value_type", "")) == "": + cdict.pop("value_type", "") + + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) < LV("3.2.0"): + value_type = cdict.pop("value_type") + cdict["attrs"]["value_type"] = value_type + + else: + value_type = cdict.get("value_type", "IP") + if value_type == "VM": + account = cdict["attrs"]["account_reference"] + account_name = account["name"] + account_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.ACCOUNT, name=account_name + ) + if not account_data: + LOG.error("Account {} not found".format(account_name)) + sys.exit(-1) + + provider_type = account_data["provider_type"] + if provider_type not in ["nutanix_pc", "vmware"]: + LOG.error( + "Provider {} not supported for endpoints".format(provider_type) + ) + sys.exit(-1) + + cdict["provider_type"] = provider_type.upper() + + tunnel = cdict.get("tunnel_reference", None) + if tunnel is not None: + if value_type not in ["IP", "HTTP"]: + LOG.error("Tunnel is supported only with IP and HTTP endpoints") + sys.exit(-1) + cdict["tunnel_reference"] = tunnel.compile() + else: + cdict.pop("tunnel_reference") + return cdict + + def post_compile(cls, cdict): + cdict = super().post_compile(cdict) + + # Setting the parent to attrs + attrs = cdict.get("attrs", {}) + + for _, v in attrs.items(): + if isinstance(v, list): + for ve in v: + if issubclass(type(ve), EntityTypeBase): + ve.__parent__ = cls + elif issubclass(type(v), EntityTypeBase): + v.__parent__ = cls + + return cdict + + def __call__(*args, **kwargs): + pass + + +class EndpointValidator(PropertyValidator, openapi_type="app_endpoint"): + __default__ = None + __kind__ = EndpointType + + +def _endpoint(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return EndpointType(name, bases, kwargs) + + +Endpoint = _endpoint() + + +def _endpoint_create(**kwargs): + name = kwargs.get("name", kwargs.get("__name__", None)) + if name is None: + name = getattr(EndpointType, "__schema_name__") + "_" + str(uuid.uuid4())[:8] + kwargs["name"] = name + bases = (Endpoint,) + return EndpointType(name, bases, kwargs) + + +def _http_endpoint( + url, + name=None, + retries=0, + retry_interval=10, + timeout=120, + verify=False, + auth=None, + tunnel=None, +): + kwargs = { + "name": name, + "type": "HTTP", + "value_type": "IP", + "attrs": { + "urls": [url] if isinstance(url, str) else url, + "retry_count": retries + 1, + "retry_interval": retry_interval, + "connection_timeout": timeout, + "tls_verify": verify, + }, + } + if auth: + kwargs["attrs"]["authentication"] = auth + else: + kwargs["attrs"]["authentication"] = {"auth_type": "none"} + if tunnel is not None: + kwargs["tunnel_reference"] = tunnel + return _endpoint_create(**kwargs) + + +def _os_endpoint( + value_type, + value_list=[], + vms=[], + name=None, + ep_type="Linux", + port=22, + connection_protocol=None, + cred=None, + subnet=None, + filter=None, + account=None, + tunnel=None, +): + kwargs = { + "name": name, + "type": ep_type, + "value_type": value_type, + "attrs": {"values": value_list, "port": port}, + } + + if value_type == "VM": + if not account: + LOG.error("Account is compulsory for vm endpoints") + sys.exit(-1) + + # If filter string is given, filter type will be set to dynamic + filter_type = "dynamic" if filter else "static" + + kwargs["attrs"]["vm_references"] = vms + kwargs["attrs"]["subnet"] = subnet + kwargs["attrs"]["filter_type"] = filter_type + kwargs["attrs"]["account_reference"] = account + if filter_type == "dynamic": + kwargs["attrs"]["filter"] = filter + + if connection_protocol: + kwargs["attrs"]["connection_protocol"] = connection_protocol + if cred is not None and isinstance(cred, CredentialType): + kwargs["attrs"]["credential_definition_list"] = [cred] + kwargs["attrs"]["login_credential_reference"] = cred.get_ref() + if tunnel: + kwargs["tunnel_reference"] = tunnel + + return _endpoint_create(**kwargs) + + +def linux_endpoint_ip( + value, name=None, port=22, os_type="Linux", cred=None, tunnel=None +): + return _os_endpoint( + "IP", value, ep_type="Linux", name=name, port=port, cred=cred, tunnel=tunnel + ) + + +def windows_endpoint_ip( + value, name=None, connection_protocol="HTTP", port=None, cred=None, tunnel=None +): + connection_protocol = connection_protocol.lower() + if connection_protocol not in ["http", "https"]: + raise TypeError( + "Connection Protocol ({}) should be HTTP/HTTPS".format(connection_protocol) + ) + + if port is None: + if connection_protocol == "http": + port = 5985 + else: + port = 5986 + return _os_endpoint( + "IP", + value, + ep_type="Windows", + connection_protocol=connection_protocol, + name=name, + port=port, + cred=cred, + tunnel=tunnel, + ) + + +def linux_endpoint_vm( + vms=[], + filter=None, + name=None, + port=22, + subnet="", + cred=None, + account=None, +): + return _os_endpoint( + "VM", + [], + vms=vms, + name=name, + ep_type="Linux", + filter=filter, + port=port, + subnet=subnet, + cred=cred, + account=account, + ) + + +def windows_endpoint_vm( + vms=[], + name=None, + filter=None, + connection_protocol="HTTP", + port=None, + cred=None, + subnet="", + account=None, +): + + connection_protocol = connection_protocol.lower() + if connection_protocol not in ["http", "https"]: + raise TypeError( + "Connection Protocol ({}) should be HTTP/HTTPS".format(connection_protocol) + ) + + if port is None: + if connection_protocol == "http": + port = 5985 + else: + port = 5986 + return _os_endpoint( + "VM", + [], + vms=vms, + ep_type="Windows", + connection_protocol=connection_protocol, + name=name, + port=port, + cred=cred, + filter=filter, + subnet=subnet, + account=account, + ) + + +def _basic_auth(username, password): + secret = {"attrs": {"is_secret_modified": True}, "value": password} + auth = {} + auth["type"] = "basic" + auth["username"] = username + auth["password"] = secret + return auth + + +def existing_endpoint(name): + kwargs = {"name": name, "attrs": {}} + bases = (Endpoint,) + return EndpointType(name, bases, kwargs) + + +class CalmEndpoint: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + use_existing = existing_endpoint + + class Linux: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + ip = linux_endpoint_ip + vm = linux_endpoint_vm + + class Windows: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + ip = windows_endpoint_ip + vm = windows_endpoint_vm + + class HTTP: + def __new__(cls, *args, **kwargs): + return _http_endpoint(*args, **kwargs) + + class Auth: + def __new__(cls, *args, **kwargs): + return _basic_auth(*args, **kwargs) diff --git a/framework/calm/dsl/builtins/models/endpoint_payload.py b/framework/calm/dsl/builtins/models/endpoint_payload.py new file mode 100644 index 0000000..73a701e --- /dev/null +++ b/framework/calm/dsl/builtins/models/endpoint_payload.py @@ -0,0 +1,52 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .endpoint import EndpointType + + +# Endpoint Payload + + +class EndpointPayloadType(EntityType): + __schema_name__ = "EndpointPayload" + __openapi_type__ = "endpoint_payload" + + +class EndpointPayloadValidator(PropertyValidator, openapi_type="endpoint_payload"): + __default__ = None + __kind__ = EndpointPayloadType + + +def _endpoint_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return EndpointPayloadType(name, bases, kwargs) + + +EndpointPayload = _endpoint_payload() + + +def create_endpoint_payload(UserEndpoint): + + err = {"error": "", "code": -1} + + if UserEndpoint is None: + err["error"] = "Given endpoint is empty." + return None, err + + if not isinstance(UserEndpoint, EndpointType): + err["error"] = "Given endpoint is not of type Endpoint" + return None, err + + spec = { + "name": UserEndpoint.__name__, + "description": UserEndpoint.__doc__ or "", + "resources": UserEndpoint, + } + + metadata = {"spec_version": 1, "kind": "endpoint", "name": UserEndpoint.__name__} + + UserEndpointPayload = _endpoint_payload() + UserEndpointPayload.metadata = metadata + UserEndpointPayload.spec = spec + + return UserEndpointPayload, None diff --git a/framework/calm/dsl/builtins/models/entity.py b/framework/calm/dsl/builtins/models/entity.py new file mode 100644 index 0000000..230772c --- /dev/null +++ b/framework/calm/dsl/builtins/models/entity.py @@ -0,0 +1,651 @@ +from collections import OrderedDict +import json +from json import JSONEncoder, JSONDecoder +import sys +import inspect +from types import MappingProxyType +import uuid +import copy +import keyword + +from ruamel.yaml import YAML, resolver, SafeRepresenter +from calm.dsl.tools import StrictDraft7Validator +from calm.dsl.log import get_logging_handle +from .schema import get_schema_details +from .utils import get_valid_identifier +from .client_attrs import update_dsl_metadata_map, get_dsl_metadata_map + +LOG = get_logging_handle(__name__) + + +class EntityDict(OrderedDict): + @staticmethod + def pre_validate(vdict, name, value): + """hook to change values before validation, typecast, etc""" + return value + + @classmethod + def _validate_attr(cls, vdict, name, value): + """validates name-value pair via __validator_dict__ of entity""" + + value = cls.pre_validate(vdict, name, value) + + if name.startswith("__") and name.endswith("__"): + return value + + try: + if name not in vdict: + raise TypeError("Unknown attribute {} given".format(name)) + ValidatorType, is_array = vdict[name] + if getattr(ValidatorType, "__is_object__", False): + return ValidatorType.validate(value, is_array) + + except TypeError: + # Check if value is a variable/action + types = EntityTypeBase.get_entity_types() + VariableType = types.get("Variable", None) + if not VariableType: + raise TypeError("Variable type not defined") + DescriptorType = types.get("Descriptor", None) + if not DescriptorType: + raise TypeError("Descriptor type not defined") + if not ( + ("variables" in vdict and isinstance(value, (VariableType,))) + or ("actions" in vdict and isinstance(type(value), DescriptorType)) + or ("runbook" in vdict and isinstance(type(value), DescriptorType)) + ): + LOG.debug("Validating object: {}".format(vdict)) + raise + + # Validate and set variable/action/runbook + # get validator for variables/action/runbook + if isinstance(value, VariableType): + ValidatorType, _ = vdict["variables"] + # Set name attribute in variable + setattr(value, "name", name) + + elif isinstance(type(value), DescriptorType): + ValidatorType = None + # Set action_name attribute in action object + setattr(value, "action_name", name) + is_array = False + + if ValidatorType is not None: + ValidatorType.validate(value, is_array) + return value + + def __init__(self, validators=dict()): + self.validators = validators + + def _validate(self, name, value): + vdict = self.validators + if vdict: + return self._validate_attr(vdict, name, value) + return value + + def __setitem__(self, name, value): + + # Validate attribute + value = self._validate(name, value) + + # Set attribute + super().__setitem__(name, value) + + +class EntityTypeBase(type): + + subclasses = {} + + @classmethod + def get_entity_types(cls): + return cls.subclasses + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + if not hasattr(cls, "__schema_name__"): + raise TypeError("Entity type does not have a schema name") + + schema_name = getattr(cls, "__schema_name__") + cls.subclasses[schema_name] = cls + + # Handle base case (Entity) + if not schema_name: + return + + # Set properties on metaclass by fetching from schema + (schema_props, validators, defaults, display_map) = get_schema_details( + schema_name + ) + + # Set validator dict on metaclass for each prop. + # To be used during __setattr__() to validate props. + # Look at validate() for details. + setattr(cls, "__validator_dict__", MappingProxyType(validators)) + + # Set defaults which will be used during serialization. + # Look at json_dumps() for details + setattr(cls, "__default_attrs__", MappingProxyType(defaults)) + + # Attach schema properties to metaclass + setattr(cls, "__schema_props__", MappingProxyType(schema_props)) + + # Attach display map for compile/decompile + setattr(cls, "__display_map__", MappingProxyType(display_map)) + + +class EntityType(EntityTypeBase): + + __schema_name__ = None + __openapi_type__ = None + __prepare_dict__ = EntityDict + + @classmethod + def validate_dict(cls, entity_dict): + schema = {"type": "object", "properties": cls.__schema_props__} + + validator = StrictDraft7Validator(schema) + validator.validate(entity_dict) + + @classmethod + def to_yaml(mcls, representer, node): + yaml_tag = resolver.BaseResolver.DEFAULT_MAPPING_TAG + return representer.represent_mapping(yaml_tag, node.compile()) + + @classmethod + def __prepare__(mcls, name, bases): + + schema_name = mcls.__schema_name__ + + # Handle base case (Entity) + if not schema_name: + return mcls.__prepare_dict__() + + validators = getattr(mcls, "__validator_dict__") + + # Class creation would happen using EntityDict() instead of dict(). + # This is done to add validations to class attrs during class creation. + # Look at __setitem__ in EntityDict + return mcls.__prepare_dict__(validators) + + def __new__(mcls, name, bases, kwargs): + + if not isinstance(kwargs, mcls.__prepare_dict__): + entitydict = mcls.__prepare__(name, bases) + for k, v in kwargs.items(): + entitydict[k] = v + else: + entitydict = kwargs + + schema_name = getattr(mcls, "__schema_name__") + + if not name: + # Generate unique name + name = "_" + schema_name + str(uuid.uuid4())[:8] + elif mcls.__schema_name__ not in ["Task", "Credential"]: + if name == schema_name: + LOG.error("'{}' is a reserved name for this entity".format(name)) + sys.exit(-1) + + elif keyword.iskeyword(name): + LOG.error("'{}' is a reserved python keyword".format(name)) + sys.exit(-1) + + cls = super().__new__(mcls, name, bases, entitydict) + + openapi_type = getattr(mcls, "__openapi_type__") + setattr(cls, "__kind__", openapi_type) + + for k, v in cls.get_default_attrs().items(): + # Check if attr was set during class creation + # else - set default value + if not hasattr(cls, k): + setattr(cls, k, v) + + return cls + + @classmethod + def validate(mcls, name, value): + + if hasattr(mcls, "__validator_dict__"): + vdict = mcls.__validator_dict__ + entity_dict = mcls.__prepare_dict__ + return entity_dict._validate_attr(vdict, name, value) + + return value + + def __setattr__(cls, name, value): + + # Validate attribute + value = cls.validate(name, value) + + # Set attribute + super().__setattr__(name, value) + + def __str__(cls): + return cls.__name__ + + def __repr__(cls): + return cls.__name__ + + def get_user_attrs(cls): + types = EntityTypeBase.get_entity_types() + ActionType = types.get("Action", None) + RunbookType = types.get("Runbook", None) + VariableType = types.get("Variable", None) + DescriptorType = types.get("Descriptor", None) + user_attrs = {} + for name, value in cls.__dict__.items(): + if ( + name.startswith("__") + and name.endswith("__") + and not isinstance(value, (VariableType, ActionType, RunbookType)) + and not isinstance(type(value), DescriptorType) + ) or name == "__parent__": + continue + user_attrs[name] = getattr(cls, name, value) + + return user_attrs + + @classmethod + def get_default_attrs(mcls): + ret = {} + default_attrs = getattr(mcls, "__default_attrs__", {}) or {} + + for key, value in default_attrs.items(): + ret[key] = value() + + # return a deepcopy, this dict or it's contents should NEVER be modified + return ret + + @classmethod + def update_attrs(mcls, attrs): + + if not hasattr(mcls, "__validator_dict__"): + return + + vdict = getattr(mcls, "__validator_dict__") + if ( + "variables" not in vdict + and "actions" not in vdict + and "runbook" not in vdict + ): + return + + # Variables and actions have [] as defaults. + # As this list can be modified/extended here, + # make a copy of variables and actions + if "variables" in vdict: + attrs["variables"] = list(attrs.get("variables", [])) + if "actions" in vdict: + attrs["actions"] = list(attrs.get("actions", [])) + + types = EntityTypeBase.get_entity_types() + ActionType = types.get("Action", None) + VariableType = types.get("Variable", None) + DescriptorType = types.get("Descriptor", None) + RunbookType = types.get("Runbook", None) + + # Update list of variables with given class-level variables + del_keys = [] + for key, value in attrs.items(): + if key not in vdict: + if isinstance(value, ActionType): + attr_name = "actions" + elif isinstance(value, VariableType): + attr_name = "variables" + elif isinstance(value, RunbookType): + attr_name = "runbook" + attrs[attr_name] = value + del_keys.append(key) + continue + elif isinstance(value.__class__, DescriptorType): + exception = getattr(value, "__exception__", None) + if exception: + raise exception + else: + raise TypeError( + "Field {} has value of type {} ".format(key, type(value)) + + "but it is not handled for this entity" + ) + attrs[attr_name].append(value) + del_keys.append(key) + + # Delete attrs + for k in del_keys: + attrs.pop(k) + + def get_all_attrs(cls): + + ncls_ns = cls.get_default_attrs() + for klass in reversed(cls.mro()): + if hasattr(klass, "get_user_attrs") and callable( + getattr(klass, "get_user_attrs") + ): + ncls_ns = {**ncls_ns, **klass.__dict__} + + ncls = type(cls)(cls.__name__, cls.__bases__, ncls_ns) + + return ncls.get_user_attrs() + + def get_not_required_if_none_attrs(cls): + not_required_attrs = [] + schema_props = cls.__schema_props__ + + for prop, value in schema_props.items(): + if value.get("x-calm-dsl-not-required-if-none", False): + not_required_attrs.append(prop) + + return not_required_attrs + + def clone(cls): + """returns the clone (deepcopy) of the original class""" + + ncls_ns = cls.get_default_attrs() + for klass in reversed(cls.mro()): + if hasattr(klass, "get_user_attrs") and callable( + getattr(klass, "get_user_attrs") + ): + ncls_ns = {**ncls_ns, **klass.__dict__} + + for k, v in ncls_ns.items(): + if isinstance(v, list): + nv = [] + for _k in v: + if hasattr(_k, "clone") and callable(getattr(_k, "clone")): + nv.append(_k.clone()) + else: + nv.append(_k) + ncls_ns[k] = nv + + elif hasattr(v, "clone") and callable(getattr(v, "clone")): + ncls_ns[k] = v.clone() + + ncls = type(cls)(cls.__name__, cls.__bases__, ncls_ns) + return ncls + + def pre_compile(cls): + """Hook to construct dsl metadata map""" + if not hasattr(cls, "__schema_name__"): + return + + entity_type = cls.__schema_name__ + entity_obj = {} + + dsl_name = cls.__name__ + ui_name = getattr(cls, "name", "") or dsl_name + + entity_obj = {"dsl_name": dsl_name, "Action": {}} + types = EntityTypeBase.get_entity_types() + ActionType = types.get("Action", None) + + # Fetching actions data inside entity + for ek, ev in cls.__dict__.items(): + if ek == "__parent__": # TODO fix this mess + continue + e_obj = getattr(cls, ek) + if isinstance(e_obj, ActionType): + user_func = ev.user_func + SYSTEM = getattr(cls, "ALLOWED_SYSTEM_ACTIONS", {}) + FRAGMENT = getattr(cls, "ALLOWED_FRAGMENT_ACTIONS", {}) + func_name = user_func.__name__.lower() + if func_name not in SYSTEM and func_name not in FRAGMENT: + # Store naming map for non-system actions + sig = inspect.signature(user_func) + gui_display_name = sig.parameters.get("name", None) + + if gui_display_name and gui_display_name.default != ev.action_name: + entity_obj["Action"][gui_display_name.default] = { + "dsl_name": ev.action_name + } + + update_dsl_metadata_map(entity_type, entity_name=ui_name, entity_obj=entity_obj) + + def compile(cls): + + cls.pre_compile() + attrs = cls.get_all_attrs() + cls.update_attrs(attrs) + + # convert keys to api schema + cdict = {} + display_map = getattr(type(cls), "__display_map__") + for k, v in attrs.items(): + if getattr(v, "__is_object__", False): + cdict.setdefault(display_map[k], v.compile(cls)) + cdict.setdefault(display_map[k], v) + + # Add name & description if present + if "name" in cdict and cdict["name"] == "": + cdict["name"] = cls.__name__ + + if "description" in cdict and cdict["description"] == "": + cdict["description"] = cls.__doc__ if cls.__doc__ else "" + + # Add extra info for roundtrip + # TODO - remove during serialization before sending to server + # cdict['__kind__'] = cls.__kind__ + not_required_if_none_attrs = cls.get_not_required_if_none_attrs() + for attr in not_required_if_none_attrs: + if not cdict.get(attr): + cdict.pop(attr) + + return cdict + + def post_compile(cls, cdict): + """method sets some properties to dict generated after compile""" + + for _, v in cdict.items(): + if isinstance(v, list): + for ve in v: + if issubclass(type(ve), EntityTypeBase): + ve.__parent__ = cls + elif issubclass(type(v), EntityTypeBase): + v.__parent__ = cls + + return cdict + + def generate_payload(cls): + """generates the payload(dict) for any entity""" + + cls.pre_compile() + cdict = cls.compile() + return cls.post_compile(cdict) + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + """Hook to modify cdict based on dsl metadata""" + + ui_name = cdict.get("name", None) + metadata = get_dsl_metadata_map(context) or {} + dsl_name = metadata.get("dsl_name", ui_name) + + # Impose validation for valid identifier + dsl_name = get_valid_identifier(dsl_name) + cdict["__name__"] = dsl_name + + # Adding description + cdict["__doc__"] = cdict.get("description", "") + + # Remove NULL and empty string data + attrs = {} + for k, v in cdict.items(): + if v is not None and v != "": + attrs[k] = v + + return attrs + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + + # Pre decompile step to get class names in blueprint file + schema_name = getattr(mcls, "__schema_name__", None) + ui_name = cdict.get("name", None) + + cur_context = copy.deepcopy(context) + # TODO clear this mess. Store context of entities as per order in blueprint + if schema_name == "Deployment": + # As cur_context will contain Profile details. So reinitiate context + cur_context = [schema_name, ui_name] + + elif schema_name and ui_name and schema_name != "Blueprint": + cur_context.extend([schema_name, ui_name]) + + cdict = mcls.pre_decompile(cdict, context=cur_context, prefix=prefix) + + # Convert attribute names to x-calm-dsl-display-name, if given + attrs = {} + display_map = getattr(mcls, "__display_map__") + display_map = {v: k for k, v in display_map.items()} + + user_attrs = {} + for k, v in cdict.items(): + # Case for __name__ and __doc__ attributes of class + if k.startswith("__") and k.endswith("__"): + attrs.setdefault(k, v) + continue + + elif k not in display_map: + LOG.warning("Additional Property ({}) found".format(k)) + continue + + user_attrs.setdefault(display_map[k], v) + + validator_dict = getattr(mcls, "__validator_dict__") + for k, v in user_attrs.items(): + validator, is_array = validator_dict[k] + + # Getting the metaclass for creation of class + if getattr(validator, "__is_object__", False): + entity_type = validator + + else: + entity_type = validator.get_kind() + if getattr(entity_type, "__schema_name__", "") == "ProviderSpec": + # Case already handled in Substrate.pre_decompile + continue + + # No decompilation is needed for entity_type = str, dict, int etc. + if hasattr(entity_type, "decompile"): + if is_array: + new_value = [] + for val in v: + new_value.append( + entity_type.decompile( + val, context=cur_context, prefix=prefix + ) + ) + else: + new_value = entity_type.decompile( + v, context=cur_context, prefix=prefix + ) + + user_attrs[k] = new_value + + # validate the new data + validator.validate(user_attrs[k], is_array) + + # Merging dsl_attrs("__name__", "__doc__" etc.) and user_attrs + attrs.update(user_attrs) + name = attrs.get("__name__", ui_name) + + if mcls.__schema_name__ not in ["Task", "Credential"]: + if name == mcls.__schema_name__: + LOG.error( + "'{}' is a reserved name for this entity. Please use '--prefix/-p' cli option to provide prefix for entity's name.".format( + name + ) + ) + sys.exit(-1) + + elif keyword.iskeyword(name): + LOG.error( + "'{}' is a reserved python keyword. Please use '--prefix/-p' cli option to provide prefix for entity's name.".format( + name + ) + ) + sys.exit(-1) + + return mcls(name, (Entity,), attrs) + + def json_dumps(cls, pprint=False, sort_keys=False): + + dump = json.dumps( + cls, + cls=EntityJSONEncoder, + sort_keys=sort_keys, + indent=4 if pprint else None, + separators=(",", ": ") if pprint else (",", ":"), + ) + + # Add newline for pretty print + return dump + "\n" if pprint else dump + + def json_loads(cls, data): + return json.loads(data, cls=EntityJSONDecoder) + + def yaml_dump(cls, stream=sys.stdout): + class MyRepresenter(SafeRepresenter): + def ignore_aliases(self, data): + return True + + yaml = YAML(typ="safe") + yaml.default_flow_style = False + yaml.Representer = MyRepresenter + + types = EntityTypeBase.get_entity_types() + + for _, t in types.items(): + yaml.register_class(t) + + yaml.indent(mapping=2, sequence=4, offset=2) + yaml.dump(cls, stream=stream) + + def get_ref(cls, kind=None): + types = EntityTypeBase.get_entity_types() + ref = types.get("Ref") + if not ref: + return + attrs = { + "name": getattr(cls, "name", "") or cls.__name__, + "kind": kind or getattr(cls, "__kind__"), + } + _cls = ref(None, (Entity,), attrs) + _cls.__self__ = cls + return _cls + + def get_dict(cls): + return json.loads(cls.json_dumps()) + + +class Entity(metaclass=EntityType): + pass + + +class EntityJSONEncoder(JSONEncoder): + def default(self, cls): + + if not hasattr(cls, "__kind__"): + return super().default(cls) + + # Add single function(wrapper) that can contain pre-post checks + return cls.generate_payload() + + +class EntityJSONDecoder(JSONDecoder): + def __init__(self, *args, **kwargs): + super().__init__(object_hook=self.object_hook, *args, **kwargs) + + def object_hook(self, attrs): + + if "__kind__" not in attrs: + return attrs + + kind = attrs["__kind__"] + types = EntityTypeBase.get_entity_types() + + Type = types.get(kind, None) + if not Type: + raise TypeError("Unknown entity type {} given".format(kind)) + + return Type.decompile(attrs) diff --git a/framework/calm/dsl/builtins/models/environment.py b/framework/calm/dsl/builtins/models/environment.py new file mode 100644 index 0000000..5765a02 --- /dev/null +++ b/framework/calm/dsl/builtins/models/environment.py @@ -0,0 +1,171 @@ +import sys + +from .entity import EntityType +from .validator import PropertyValidator +from .helper import common as common_helper +from .calm_ref import Ref +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.constants import PROVIDER_ACCOUNT_TYPE_MAP + +LOG = get_logging_handle(__name__) + + +# Project + + +class EnvironmentType(EntityType): + __schema_name__ = "Environment" + __openapi_type__ = "environment" # TODO use mentioned in calm api schemas + + def compile(cls): + cdict = super().compile() + + substrates = cdict.get("substrate_definition_list", []) + + # ensure that infra_inclusion_list only contains items whitelisted in the project + project_cache_data = common_helper.get_cur_context_project() + project_name = project_cache_data.get("name") + + # environment_infra_list + environment_infra_list = [] + + infra = cdict.get("infra_inclusion_list", []) + for provider_obj in infra: + provider_data = provider_obj.get_dict() + + # Check if given account is filtered in project + infra_account = provider_data["account_reference"] + infra_account_name = infra_account["name"] + infra_account_uuid = infra_account["uuid"] + infra_type = provider_obj.type + if infra_account["uuid"] not in project_cache_data["accounts_data"].get( + infra_type, [] + ): + LOG.error( + "Environment uses {} account '{}' which is not added to project {}.".format( + infra_type, infra_account_name, project_name + ) + ) + sys.exit(-1) + + if infra_type != "nutanix_pc": + provider_data.pop("subnet_reference_list", None) + provider_data.pop("external_network_list", None) + provider_data.pop("default_subnet_reference", None) + + else: + provider_data["subnet_references"] = provider_data.get( + "subnet_reference_list", [] + ) + provider_data.get("external_network_list", []) + provider_data.pop("subnet_reference_list", None) + provider_data.pop("external_network_list", None) + provider_data["cluster_references"] = provider_data.get( + "cluster_reference_list", [] + ) + provider_data.pop("cluster_reference_list", None) + provider_data["vpc_references"] = provider_data.get( + "vpc_reference_list", [] + ) + provider_data.pop("vpc_reference_list", None) + + for cluster in provider_data["cluster_references"]: + if cluster["uuid"] not in project_cache_data[ + "whitelisted_clusters" + ].get(infra_account_uuid, []): + LOG.error( + "Environment uses cluster {} for nutanix_pc account {} which is not added to " + "project {}.".format( + cluster["name"], infra_account_name, project_name + ) + ) + sys.exit(-1) + + for vpc in provider_data["vpc_references"]: + if vpc["uuid"] not in project_cache_data["whitelisted_vpcs"].get( + infra_account_uuid, [] + ): + LOG.error( + "Environment uses vpc {} for nutanix_pc account {} which is not added to " + "project {}.".format( + vpc["name"], infra_account_name, project_name + ) + ) + sys.exit(-1) + + for sub in provider_data["subnet_references"]: + if sub["uuid"] not in project_cache_data["whitelisted_subnets"].get( + infra_account_uuid, [] + ): + LOG.error( + "Environment uses subnet {} for nutanix_pc account {} which is not added to " + "project {}.".format( + sub["name"], infra_account_name, project_name + ) + ) + sys.exit(-1) + + environment_infra_list.append(provider_data) + + # environment infra added in 3.2.0 + if environment_infra_list: + cdict["infra_inclusion_list"] = environment_infra_list + + # NOTE Only one substrate per (provider_type, os_type) tuple can exist + sub_set = set() + for sub in substrates: + _sub_tuple = (sub.provider_type, sub.os_type) + if _sub_tuple in sub_set: + LOG.error( + "Multiple substrates of provider_type '{}' for os type '{}' in an environment are not allowed.".format( + sub.provider_type, sub.os_type + ) + ) + sys.exit(-1) + + else: + sub_set.add(_sub_tuple) + + return cdict + + def post_compile(cls, cdict): + cdict = super().post_compile(cdict) + + # Substrate should use account defined in the environment only + inv_dict = {v: k for k, v in PROVIDER_ACCOUNT_TYPE_MAP.items()} + infra_type_account_map = {} + infra = cdict.get("infra_inclusion_list", []) + for row in infra: + account_ref = row["account_reference"] + account_uuid = account_ref.get("uuid") + + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=account_uuid + ) + account_name = account_cache_data.get("name") + provider_type = account_cache_data.get("provider_type") + + infra_type_account_map[inv_dict[provider_type]] = Ref.Account(account_name) + + if infra_type_account_map: + substrates = cdict.get("substrate_definition_list", []) + for sub in substrates: + provider_type = getattr(sub, "provider_type") + sub.account = infra_type_account_map[provider_type] + + return cdict + + +class EnvironmentValidator(PropertyValidator, openapi_type="environment"): + __default__ = None + __kind__ = EnvironmentType + + +def environment(**kwargs): + name = kwargs.get("name", None) + bases = () + return EnvironmentType(name, bases, kwargs) + + +Environment = environment() diff --git a/framework/calm/dsl/builtins/models/environment_payload.py b/framework/calm/dsl/builtins/models/environment_payload.py new file mode 100644 index 0000000..00d6442 --- /dev/null +++ b/framework/calm/dsl/builtins/models/environment_payload.py @@ -0,0 +1,96 @@ +import sys +import uuid +from distutils.version import LooseVersion as LV + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .environment import EnvironmentType +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache, Version +from calm.dsl.constants import CACHE + +LOG = get_logging_handle(__name__) +# Blueprint Payload + + +class EnvironmentPayloadType(EntityType): + __schema_name__ = "EnvironmentPayload" + __openapi_type__ = "app_environment_payload" + + +class EnvironmentPayloadValidator( + PropertyValidator, openapi_type="app_environment_payload" +): + __default__ = None + __kind__ = EnvironmentPayloadType + + +def _environment_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return EnvironmentPayloadType(name, bases, kwargs) + + +EnvironmentPayload = _environment_payload() + + +def create_environment_payload(UserEnvironment, metadata=dict()): + """ + Creates environment payload + Args: + UserEnvironment(object): Environment object + metadata (dict) : Metadata for environment + Returns: + response(tuple): tuple consisting of environment payload object and error + """ + + err = {"error": "", "code": -1} + + if UserEnvironment is None: + err["error"] = "Given environment is empty." + return None, err + + if not isinstance(UserEnvironment, EnvironmentType): + err["error"] = "Given environment is not of type Environment" + return None, err + + spec = { + "name": UserEnvironment.__name__, + "description": UserEnvironment.__doc__ or "", + "resources": UserEnvironment, + } + + env_project = metadata.get("project_reference", {}).get("name", "") + if not env_project: + ContextObj = get_context() + project_config = ContextObj.get_project_config() + env_project = project_config["name"] + + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=env_project + ) + if not project_cache_data: + LOG.error("Project {} not found.".format(env_project)) + sys.exit("Project {} not found.".format(env_project)) + + metadata_payload = { + "spec_version": 1, + "kind": "environment", + "name": UserEnvironment.__name__, + "uuid": str(uuid.uuid4()), + } + + calm_version = Version.get_version("Calm") + if LV(calm_version) >= LV("3.2.0"): + metadata_payload["project_reference"] = { + "kind": "project", + "name": project_cache_data["name"], + "uuid": project_cache_data["uuid"], + } + + UserEnvironmentPayload = _environment_payload() + UserEnvironmentPayload.metadata = metadata_payload + UserEnvironmentPayload.spec = spec + + return UserEnvironmentPayload, None diff --git a/framework/calm/dsl/builtins/models/helper/__init__.py b/framework/calm/dsl/builtins/models/helper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/calm/dsl/builtins/models/helper/common.py b/framework/calm/dsl/builtins/models/helper/common.py new file mode 100644 index 0000000..8557fbf --- /dev/null +++ b/framework/calm/dsl/builtins/models/helper/common.py @@ -0,0 +1,327 @@ +import sys +import json +from ..metadata_payload import get_metadata_obj +from calm.dsl.api import get_api_client +from calm.dsl.store import Cache +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle +from calm.dsl.constants import CACHE + +LOG = get_logging_handle(__name__) + + +def _walk_to_parent_with_given_type(cls, parent_type): + """traverse parent reference for the given class until reach a class that matches the given type""" + try: + if cls.__class__.__name__ == parent_type: + return cls + + return _walk_to_parent_with_given_type(cls.__parent__, parent_type) + except (AttributeError, TypeError): + LOG.debug("cls {} has no parent reference".format(cls)) + return None + + +def get_project_with_pc_account(): + """get project from metadata/config along with whitelisted accounts and subnets""" + + project_cache_data = get_cur_context_project() + project_name = project_cache_data["name"] + project_pc_accounts = project_cache_data.get("accounts_data", {}).get( + "nutanix_pc", [] + ) + if not project_pc_accounts: + LOG.error("No nutanix account registered to project {}".format(project_name)) + sys.exit(-1) + + accounts_data = {} + for ntnx_acc_uuid in project_pc_accounts: + accounts_data[ntnx_acc_uuid] = { + "subnet_uuids": project_cache_data["whitelisted_subnets"].get(ntnx_acc_uuid) + or [], + "vpc_uuids": project_cache_data["whitelisted_vpcs"].get(ntnx_acc_uuid) + or [], + "cluster_uuids": project_cache_data["whitelisted_clusters"].get( + ntnx_acc_uuid + ) + or [], + } + + return ( + dict(uuid=project_cache_data.get("uuid", ""), name=project_name), + accounts_data, + ) + + +def get_cur_context_project(): + """ + Returns project in current context i.e. from metadata/config + fallback in this order: metadata(defined in dsl file) -> config + """ + metadata_obj = get_metadata_obj() + project_ref = metadata_obj.get("project_reference") or dict() + + # If project not found in metadata, it will take project from config + context = get_context() + project_config = context.get_project_config() + project_name = project_ref.get("name") or project_config["name"] + + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format(project_name) + ) + sys.exit(-1) + + return project_cache_data + + +def get_project(name=None, project_uuid=""): + + if not (name or project_uuid): + LOG.error("One of name or uuid must be provided") + sys.exit(-1) + + client = get_api_client() + if not project_uuid: + params = {"filter": "name=={}".format(name)} + + LOG.info("Searching for the project {}".format(name)) + res, err = client.project.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + project = None + if entities: + if len(entities) != 1: + raise Exception("More than one project found - {}".format(entities)) + + LOG.info("Project {} found ".format(name)) + project = entities[0] + else: + raise Exception("No project found with name {} found".format(name)) + + project_uuid = project["metadata"]["uuid"] + LOG.info("Fetching details of project {}".format(name)) + + else: + LOG.info("Fetching details of project (uuid='{}')".format(project_uuid)) + + res, err = client.project.read(project_uuid) # for getting additional fields + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + return project + + +def get_vmware_account_from_datacenter(datacenter="Sabine59-DC"): + """ + Returns the datacenter attached to given datacenter. + Default datacenter = Sabine59-DC + """ + + client = get_api_client() + res, err = client.account.list(params={"filter": "type==vmware;state==VERIFIED"}) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + vmw_account_name = "" + for entity in res["entities"]: + if entity["status"]["resources"]["data"].get("datacenter", "") == datacenter: + vmw_account_name = entity["status"]["name"] + + return vmw_account_name + + +def is_macro(var): + """returns true if given var is macro""" + return var.startswith("@@{") and var.endswith("}@@") + + +def get_pe_account_uuid_using_pc_account_uuid_and_subnet_uuid( + pc_account_uuid, subnet_uuid +): + """ + returns pe account uuid using pc account uuid and subnet_uuid + """ + + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, + uuid=subnet_uuid, + account_uuid=pc_account_uuid, + ) + if not subnet_cache_data: + LOG.error( + "AHV Subnet (uuid='{}') not found. Please check subnet or update cache".format( + subnet_uuid + ) + ) + sys.exit("Ahv Subnet {} not found".format(subnet_uuid)) + + # As for nutanix accounts, cluster name is account name + LOG.debug("Subnet cache data: {}".format(subnet_cache_data)) + subnet_cluster_name = subnet_cache_data["cluster_name"] + + pc_account_cache = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=pc_account_uuid + ) + pc_clusters = pc_account_cache["data"].get("clusters", {}) + pc_clusters_rev = {v: k for k, v in pc_clusters.items()} + + return pc_clusters_rev.get(subnet_cluster_name, "") + + +def get_pe_account_uuid_using_pc_account_uuid_and_nic_data( + pc_account_uuid, subnet_name, cluster_name +): + """ + returns pe account uuid using pc account uuid and subnet_name and cluster_name + """ + + subnet_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_SUBNET, + name=subnet_name, + cluster=cluster_name, + account_uuid=pc_account_uuid, + ) + + if not subnet_cache_data: + LOG.error( + "Ahv Subnet (name = '{}') not found in registered Nutanix PC account (uuid = '{}') ".format( + subnet_name, pc_account_uuid + ) + ) + sys.exit("AHV Subnet {} not found".format(subnet_name)) + + # As for nutanix accounts, cluster name is account name + subnet_cluster_name = subnet_cache_data["cluster_name"] + + pc_account_cache = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=pc_account_uuid + ) + pc_clusters = pc_account_cache["data"].get("clusters", {}) + pc_clusters_rev = {v: k for k, v in pc_clusters.items()} + + return pc_clusters_rev.get(subnet_cluster_name, "") + + +def get_pe_account_using_pc_account_uuid_and_cluster_name( + pc_account_uuid, cluster_name +): + """ + returns pe account uuid using pc account uuid and cluster_name + """ + cluster_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.AHV_CLUSTER, + name=cluster_name, + account_uuid=pc_account_uuid, + ) + return cluster_cache_data.get("pe_account_uuid", "") + + +def get_network_group(name=None, tunnel_uuid=None): + + if not (name): + LOG.error(" name must be provided") + sys.exit(-1) + + nested_attributes = [ + "tunnel_name", + "tunnel_vm_name", + "tunnel_status", + "app_uuid", + "app_status", + ] + + client = get_api_client() + network_group_uuid = None + network_group = None + if not network_group_uuid: + params = {} + filter_query = "" + if name: + params = {"filter": "name=={}".format(name)} + elif tunnel_uuid: + params = {"filter": "tunnel_reference=={}".format(tunnel_uuid)} + params["nested_attributes"] = nested_attributes + + LOG.info("Searching for the network group {}".format(name)) + res, err = client.network_group.list(params=params) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + network_group = None + if entities: + if len(entities) != 1: + LOG.exception("More than one Network Group found - {}".format(entities)) + + LOG.info("Network Group {} found ".format(name)) + network_group = entities[0] + else: + LOG.exception("No Network Group found with name {} found".format(name)) + + return network_group + + +def get_network_group_by_tunnel_name(name): + + if not (name): + LOG.error(" name must be provided") + sys.exit(-1) + + nested_attributes = [ + "tunnel_name", + "tunnel_vm_name", + "tunnel_status", + "app_uuid", + "app_status", + ] + + client = get_api_client() + + network_group = {} + + params = {"filter": "name=={}".format(name)} + res, err = client.tunnel.list(params=params) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + LOG.debug("Tunnel response: {}".format(response)) + + tunnels = response.get("entities", []) + if not tunnels: + LOG.exception("No Tunnel found with name: {}".format(name)) + + tunnel_uuid = tunnels[0].get("metadata", {}).get("uuid") + + if tunnel_uuid: + params = {"filter": "tunnel_reference=={}".format(tunnel_uuid)} + params["nested_attributes"] = nested_attributes + + LOG.info("Searching for the network group using tunnel name: {}".format(name)) + res, err = client.network_group.list(params=params) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + network_group = None + if entities: + if len(entities) != 1: + LOG.exception("More than one Network Group found - {}".format(entities)) + + LOG.info("Network Group {} found ".format(name)) + network_group = entities[0] + else: + LOG.exception("No Network Group found with name {} found".format(name)) + + return network_group diff --git a/framework/calm/dsl/builtins/models/job.py b/framework/calm/dsl/builtins/models/job.py new file mode 100644 index 0000000..a53ad0c --- /dev/null +++ b/framework/calm/dsl/builtins/models/job.py @@ -0,0 +1,448 @@ +import json +import sys +import click +from datetime import datetime + +try: + from zoneinfo import ZoneInfo +except ImportError: + from backports.zoneinfo import ZoneInfo + +from .entity import EntityType, Entity +from .validator import PropertyValidator + +from calm.dsl.api import get_api_client +from calm.dsl.log import get_logging_handle +from .constants import SYSTEM_ACTIONS + +LOG = get_logging_handle(__name__) + + +# # Job Schedule Info +class JobSchedule(EntityType): + __schema_name__ = "JobScheduleInfo" + __openapi_type__ = "job_schedule_info" + + def compile(cls): + cdict = super().compile() + if cdict["execution_time"] != "": + cdict.pop("schedule", None) + cdict.pop("expiry_time", None) + cdict.pop("start_time", None) + else: + cdict.pop("execution_time", None) + return cdict + + +class JobScheduleValidator(PropertyValidator, openapi_type="job_schedule_info"): + __default__ = None + __kind__ = JobSchedule + + +def _jobschedule_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return JobSchedule(name, bases, kwargs) + + +JobScheduleInfo = _jobschedule_payload() + +# Job Executable +class JobExecutable(EntityType): + __schema_name__ = "JobExecutable" + __openapi_type__ = "executable_resources" + + def compile(cls): + cdict = super().compile() + if cdict.get("action").get("type") == "RUNBOOK_RUN": + cdict["action"]["spec"].pop("uuid", None) + + return cdict + + +class JobExecuableValidator(PropertyValidator, openapi_type="executable_resources"): + __default__ = None + __kind__ = JobExecutable + + +def _jobexecutable_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return JobExecutable(name, bases, kwargs) + + +JobExec = _jobexecutable_payload() + + +def _create_job_executable_payload( + entity_type, entity_uuid, action_type, payload, action_uuid=None +): + + payload = { + "entity": { + "type": entity_type, + "uuid": entity_uuid, + }, + "action": { + "type": action_type, + "spec": {"payload": str(json.dumps(payload))}, + }, + } + + if ( + action_type == "APP_ACTION_RUN" + or action_type == "APP_ACTION_DELETE" + or action_type == "APP_ACTION_SOFT_DELETE" + ): + payload["action"]["spec"]["uuid"] = action_uuid + + return _jobexecutable_payload(**payload) + + +# def _create_job_executable_payload_for_app_action( +# entity_type, entity_uuid, action_type, action_uuid, payload +# ): +# +# payload = { +# "entity": { +# "type": entity_type, +# "uuid": entity_uuid, +# }, +# "action": { +# "type": action_type, +# "spec": { +# "uuid": action_uuid, +# "payload": str(json.dumps(payload)) +# }, +# }, +# } +# +# return _jobexecutable_payload(**payload) + + +# create payload for One Time job +def _create_one_time_job_schedule_payload(execution_time, time_zone): + + payload = {"execution_time": str(execution_time), "time_zone": time_zone} + + return _jobschedule_payload(**payload) + + +# create payload for recurring job +def _create_recurring_job_schedule_payload( + schedule, expiry_time, start_time, time_zone +): + + payload = { + "schedule": schedule, + "expiry_time": str(expiry_time), + "start_time": str(start_time), + "time_zone": str(time_zone), + } + + return _jobschedule_payload(**payload) + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +# Job +class JobType(EntityType): + __schema_name__ = "Job" + __openapi_type__ = "scheduler_job" + + def compile(cls): + cdict = super().compile() + cdict["state"] = "ACTIVE" + cdict["skip_concurrent_execution"] = False + + # Setting value of type based on execution_time present in schedule_info or not + if cdict["schedule_info"].execution_time != "": + cdict["type"] = "ONE-TIME" + else: + cdict["type"] = "RECURRING" + + return cdict + + +class JobValidator(PropertyValidator, openapi_type="scheduler_job"): + __default__ = None + __kind__ = JobType + + +def _job_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return JobType(name, bases, kwargs) + + +Job = _job_payload() + + +# Interfaces exposed to users + + +def patch_runbook_runtime_editables(client, runbook): + + from calm.dsl.cli import runbooks + + args = [] + variable_list = runbook["spec"]["resources"]["runbook"].get("variable_list", []) + for variable in variable_list: + if variable.get("editables", {}).get("value", False): + options = variable.get("options", {}) + choices = options.get("choices", []) + if choices: + click.echo("Choose from given choices: ") + for choice in choices: + click.echo("\t{}".format(highlight_text(repr(choice)))) + + default_val = variable.get("value", "") + is_secret = variable.get("type") == "SECRET" + + new_val = click.prompt( + "Value for variable '{}' [{}]".format( + variable["name"], + highlight_text(default_val if not is_secret else "*****"), + ), + default=default_val, + show_default=False, + hide_input=is_secret, + type=click.Choice(choices) if choices else type(default_val), + show_choices=False, + ) + if new_val: + args.append( + { + "name": variable.get("name"), + "value": type(variable.get("value", ""))(new_val), + } + ) + + for arg in args: + for variable in variable_list: + if variable["name"] == arg["name"]: + variable["value"] = arg["value"] + + payload = {"spec": {"args": variable_list}} + + default_target = ( + runbook["spec"]["resources"] + .get("default_target_reference", {}) + .get("name", None) + ) + target = input( + "Endpoint target for the Runbook Run (default target={}): ".format( + default_target + ) + ) + if target == "": + target = default_target + if target: + endpoint = runbooks.get_endpoint(client, target) + endpoint_id = endpoint.get("metadata", {}).get("uuid", "") + payload["spec"]["default_target_reference"] = { + "kind": "app_endpoint", + "uuid": endpoint_id, + "name": target, + } + return payload + + +def exec_runbook(runbook_name, patch_editables=True): + # Get runbook uuid from name + + from calm.dsl.cli import runbooks + + client = get_api_client() + runbook = runbooks.get_runbook(client, runbook_name, all=True) + res, err = client.runbook.read(runbook["metadata"]["uuid"]) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + runbook = res.json() + runbook_uuid = runbook["metadata"]["uuid"] + if not patch_editables: + payload = {} + else: + payload = patch_runbook_runtime_editables(client, runbook) + return _create_job_executable_payload( + "runbook", runbook_uuid, "RUNBOOK_RUN", payload, None + ) + + +def exec_app_action( + app_name, action_name, patch_editables=True, runtime_params_file=False +): + from calm.dsl.cli import apps + + # Get app uuid from name + client = get_api_client() + app = apps._get_app(client, app_name, all=True) + res, err = client.application.read(app["metadata"]["uuid"]) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + app = res.json() + app_spec = app["spec"] + app_id = app["metadata"]["uuid"] + + calm_action_name = "action_" + action_name.lower() + action_payload = next( + ( + action + for action in app_spec["resources"]["action_list"] + if action["name"] == calm_action_name or action["name"] == action_name + ), + None, + ) + if not action_payload: + LOG.error("No action found matching name {}".format(action_name)) + sys.exit(-1) + + action_args = apps.get_action_runtime_args( + app_uuid=app_id, + action_payload=action_payload, + patch_editables=patch_editables, + runtime_params_file=runtime_params_file, + ) + + action_id = action_payload["uuid"] + + if action_name.lower() == SYSTEM_ACTIONS.CREATE: + click.echo( + "The Create Action is triggered automatically when you deploy a blueprint. It cannot be run separately." + ) + return + if action_name.lower() == SYSTEM_ACTIONS.DELETE: + return _create_job_executable_payload( + "app", app_id, "APP_ACTION_DELETE", app, action_id + ) + if action_name.lower() == SYSTEM_ACTIONS.SOFT_DELETE: + return _create_job_executable_payload( + "app", app_id, "APP_ACTION_SOFT_DELETE", app, action_id + ) + + # Hit action run api (with metadata and minimal spec: [args, target_kind, target_uuid]) + status = app.pop("status") + config_list = status["resources"]["snapshot_config_list"] + config_list.extend(status["resources"]["restore_config_list"]) + for task in action_payload["runbook"]["task_definition_list"]: + if task["type"] == "CALL_CONFIG": + config = next( + config + for config in config_list + if config["uuid"] == task["attrs"]["config_spec_reference"]["uuid"] + ) + if config["type"] == "AHV_SNAPSHOT": + action_args.append(apps.get_snapshot_name_arg(config, task["uuid"])) + elif config["type"] == "AHV_RESTORE": + substrate_id = next( + ( + dep["substrate_configuration"]["uuid"] + for dep in status["resources"]["deployment_list"] + if dep["uuid"] + == config["attrs_list"][0]["target_any_local_reference"]["uuid"] + ), + None, + ) + api_filter = "" + if substrate_id: + api_filter = "substrate_reference==" + substrate_id + res, err = client.application.get_recovery_groups(app_id, api_filter) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + action_args.append( + apps.get_recovery_point_group_arg( + config, task["uuid"], res.json()["entities"] + ) + ) + + app["spec"] = { + "args": action_args, + "target_kind": "Application", + "target_uuid": app_id, + } + + return _create_job_executable_payload( + "app", app_id, "APP_ACTION_RUN", app, action_id + ) + + +def set_one_time_schedule_info(start_time, time_zone="UTC"): + # Get User timezone + user_tz = ZoneInfo(time_zone) + # Convert datetime string to datetime object + datetime_obj = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S") + # Append timezone to datetime object + datetime_obj_with_tz = datetime( + datetime_obj.year, + datetime_obj.month, + datetime_obj.day, + datetime_obj.hour, + datetime_obj.minute, + datetime_obj.second, + tzinfo=user_tz, + ) + # Convert to Epoch + seconds_since_epoch = int(datetime_obj_with_tz.timestamp()) + + return _create_one_time_job_schedule_payload(seconds_since_epoch, time_zone) + + +def set_recurring_schedule_info(schedule, start_time, expiry_time, time_zone="UTC"): + # Get User timezone + user_tz = ZoneInfo(time_zone) + # Convert datetime string to datetime object + datetime_obj = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S") + # Append timezone to datetime object + datetime_obj_with_tz = datetime( + datetime_obj.year, + datetime_obj.month, + datetime_obj.day, + datetime_obj.hour, + datetime_obj.minute, + datetime_obj.second, + tzinfo=user_tz, + ) + # Convert to Epoch + seconds_since_epoch_start_time = int(datetime_obj_with_tz.timestamp()) + + datetime_obj = datetime.strptime(expiry_time, "%Y-%m-%d %H:%M:%S") + datetime_obj_with_tz = datetime( + datetime_obj.year, + datetime_obj.month, + datetime_obj.day, + datetime_obj.hour, + datetime_obj.minute, + datetime_obj.second, + tzinfo=user_tz, + ) + seconds_since_epoch_expiry_time = int(datetime_obj_with_tz.timestamp()) + + return _create_recurring_job_schedule_payload( + schedule, + seconds_since_epoch_expiry_time, + seconds_since_epoch_start_time, + time_zone, + ) + + +class JobScheduler: + class Exec: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + runbook = exec_runbook + app_action = exec_app_action + + class ScheduleInfo: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + oneTime = set_one_time_schedule_info + recurring = set_recurring_schedule_info diff --git a/framework/calm/dsl/builtins/models/metadata.py b/framework/calm/dsl/builtins/models/metadata.py new file mode 100644 index 0000000..6f73ec1 --- /dev/null +++ b/framework/calm/dsl/builtins/models/metadata.py @@ -0,0 +1,33 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# Metadata + + +class MetadataType(EntityType): + __schema_name__ = "Metadata" + __openapi_type__ = "app_metadata" + + def compile(cls): + cdict = super().compile() + if not cdict.get("owner_reference", {}): + cdict.pop("owner_reference", None) + if not cdict.get("categories", {}): + cdict.pop("categories", None) + if not cdict.get("project_reference", {}): + cdict.pop("project_reference", None) + return cdict + + +class MetadataValidator(PropertyValidator, openapi_type="app_metadata"): + __default__ = None + __kind__ = MetadataType + + +def _metadata(**kwargs): + bases = (Entity,) + return MetadataType(None, bases, kwargs) + + +Metadata = _metadata() diff --git a/framework/calm/dsl/builtins/models/metadata_payload.py b/framework/calm/dsl/builtins/models/metadata_payload.py new file mode 100644 index 0000000..0a7b458 --- /dev/null +++ b/framework/calm/dsl/builtins/models/metadata_payload.py @@ -0,0 +1,57 @@ +# NOTE This module is not added to `builtins.__init__` bczit is only for dsl internal logics not for making blueprints +# Below helpers are used in both `calm/dsl/cli/` and `calm/dsl/builtins/` +# Import its helpers using `from calm.dsl.builtins.models.metadata_payload import *` + +from .metadata import Metadata +from calm.dsl.tools import get_module_from_file + +_MetadataPayload = dict() + + +# TODO change it to class +def get_metadata_module_from_file(dsl_file): + """Returns module given a file (.py)""" + + return get_module_from_file("calm.dsl.user_metadata", dsl_file) + + +def get_metadata_class_from_module(user_module): + """Returns project class given a module""" + + UserMetadata = None + for item in dir(user_module): + obj = getattr(user_module, item) + if isinstance(obj, type(Metadata)): + if obj.__bases__[0] == Metadata: + UserMetadata = obj + + return UserMetadata + + +def get_metadata_payload(dsl_file): + """ + returns the metadata payload from the dsl_file + """ + + global _MetadataPayload + user_metadata_module = get_metadata_module_from_file(dsl_file) + UserMetadata = get_metadata_class_from_module(user_metadata_module) + + payload = {} + if UserMetadata: + payload = UserMetadata.get_dict() + + # updating global object + _MetadataPayload = payload + return payload + + +def reset_metadata_obj(): + """resets metadata object""" + + global _MetadataPayload + _MetadataPayload = dict() + + +def get_metadata_obj(): + return _MetadataPayload diff --git a/framework/calm/dsl/builtins/models/network_group_tunnel.py b/framework/calm/dsl/builtins/models/network_group_tunnel.py new file mode 100644 index 0000000..006766e --- /dev/null +++ b/framework/calm/dsl/builtins/models/network_group_tunnel.py @@ -0,0 +1,78 @@ +from distutils.version import LooseVersion as LV +import uuid + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version + +LOG = get_logging_handle(__name__) + + +# NetworkGroupTunnel + + +class NetworkGroupTunnelType(EntityType): + __schema_name__ = "NetworkGroupTunnel" + __openapi_type__ = "network_group_tunnel" + + def compile(cls): + cdict = super().compile() + + platform_vpcs = cdict.pop("platform_vpc_uuid_list", []) + vpc_uuids = [] + vpc_name = None + vpc_dicts = [] + for vpc in platform_vpcs: + vpc_dict = vpc.compile() + vpc_uuid = vpc_dict.get("uuid", None) + if vpc_uuid: + vpc_uuids.append(vpc_dict.get("uuid", None)) + vpc_dicts.append(vpc_dict) + cdict["platform_vpc_uuid_list"] = vpc_uuids + + if len(vpc_dicts) > 0: + vpc_name = vpc_dicts[0].get("name") + + account_ref = cdict.get("account_reference", None) + account_uuid = None + if account_ref and isinstance(account_ref, EntityType): + account_dict = account_ref.get_dict() + account_uuid = account_dict.get("uuid", None) + + tunnel_vm_spec_obj = cdict.pop("tunnel_vm_spec", None) + tunnel_name = cdict.pop("tunnel_reference", "") + user_tunnel_vm_name = "" + if tunnel_vm_spec_obj: + tunnel_vm_spec_dict = tunnel_vm_spec_obj.compile( + vpc=vpc_name, account_uuid=account_uuid + ) + cdict["tunnel_vm_spec"] = tunnel_vm_spec_dict + user_tunnel_vm_name = tunnel_vm_spec_dict.get("vm_name", None) + + if not user_tunnel_vm_name: + tunnel_vm_spec_dict["vm_name"] = tunnel_name + "_VM" + + tunnel_reference = { + "kind": "tunnel", + "name": tunnel_name, + "uuid": str(uuid.uuid4()), + } + cdict["tunnel_reference"] = tunnel_reference + return cdict + + +class NetworkGroupTunnelValidator( + PropertyValidator, openapi_type="network_group_tunnel" +): + __default__ = None + __kind__ = NetworkGroupTunnelType + + +def network_group_tunnel(**kwargs): + name = kwargs.get("name", None) + bases = () + return NetworkGroupTunnelType(name, bases, kwargs) + + +NetworkGroupTunnel = network_group_tunnel() diff --git a/framework/calm/dsl/builtins/models/network_group_tunnel_payload.py b/framework/calm/dsl/builtins/models/network_group_tunnel_payload.py new file mode 100644 index 0000000..4d4efcd --- /dev/null +++ b/framework/calm/dsl/builtins/models/network_group_tunnel_payload.py @@ -0,0 +1,64 @@ +from calm.dsl.api import tunnel +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .network_group_tunnel import NetworkGroupTunnelType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Blueprint Payload + + +class NetworkGroupTunnelPayloadType(EntityType): + __schema_name__ = "NetworkGroupTunnelPayload" + __openapi_type__ = "app_network_group_tunnel_payload" + + +class NetworkGroupTunnelPayloadValidator( + PropertyValidator, openapi_type="app_network_group_tunnel_payload" +): + __default__ = None + __kind__ = NetworkGroupTunnelPayloadType + + +def _network_group_tunnel_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return NetworkGroupTunnelPayloadType(name, bases, kwargs) + + +NetworkGroupTunnelPayload = _network_group_tunnel_payload() + + +def create_network_group_tunnel_payload(UserNetworkGroupTunnel): + + err = {"error": "", "code": -1} + + if UserNetworkGroupTunnel is None: + err["error"] = "Given network group tunnel is empty." + return None, err + + if not isinstance(UserNetworkGroupTunnel, NetworkGroupTunnelType): + err["error"] = "Given network group tunnel is not of type NetworkGroupTunnel" + return None, err + + UserNetworkGroupTunnelPayload = _network_group_tunnel_payload() + + tunnel_name = UserNetworkGroupTunnel.__name__ + UserNetworkGroupTunnel.tunnel_name = tunnel_name + spec = { + "name": tunnel_name + "_ng", + "description": UserNetworkGroupTunnel.__doc__ or "", + "resources": UserNetworkGroupTunnel, + } + + metadata = { + "spec_version": 1, + "kind": "network_group_tunnel", + "name": tunnel_name + "_ng", + } + + UserNetworkGroupTunnelPayload.metadata = metadata + UserNetworkGroupTunnelPayload.spec = spec + + return UserNetworkGroupTunnelPayload, None diff --git a/framework/calm/dsl/builtins/models/network_group_tunnel_vm_payload.py b/framework/calm/dsl/builtins/models/network_group_tunnel_vm_payload.py new file mode 100644 index 0000000..6d1e7fc --- /dev/null +++ b/framework/calm/dsl/builtins/models/network_group_tunnel_vm_payload.py @@ -0,0 +1,68 @@ +from calm.dsl.api import network_group, tunnel +from calm.dsl.builtins.models.helper.common import get_network_group_by_tunnel_name +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .network_group_tunnel_vm_spec import NetworkGroupTunnelVMSpecType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Blueprint Payload + + +class NetworkGroupTunnelVMPayloadType(EntityType): + __schema_name__ = "NetworkGroupTunnelVMPayload" + __openapi_type__ = "app_network_group_tunnel_vm_payload" + + +class NetworkGroupTunnelVMPayloadValidator( + PropertyValidator, openapi_type="app_network_group_tunnel_vm_payload" +): + __default__ = None + __kind__ = NetworkGroupTunnelVMPayloadType + + +def _network_group_tunnel_vm_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return NetworkGroupTunnelVMPayloadType(name, bases, kwargs) + + +NetworkGroupTunnelPayload = _network_group_tunnel_vm_payload() + + +def create_network_group_tunnel_vm_payload( + UserNetworkGroupTunnelVM, network_group_tunnel_name +): + + err = {"error": "", "code": -1} + + if UserNetworkGroupTunnelVM is None: + err["error"] = "Given network group tunnel is empty." + return None, err + + if not isinstance(UserNetworkGroupTunnelVM, NetworkGroupTunnelVMSpecType): + err["error"] = "Given network group tunnel is not of type NetworkGroupTunnel" + return None, err + + UserNetworkGroupTunnelVMPayload = _network_group_tunnel_vm_payload() + + spec = { + "name": network_group_tunnel_name, + "description": UserNetworkGroupTunnelVM.__doc__ or "", + "resources": UserNetworkGroupTunnelVM, + } + + if UserNetworkGroupTunnelVM.vm_name == "": + UserNetworkGroupTunnelVM.vm_name = network_group_tunnel_name + "_VM" + + metadata = { + "spec_version": 1, + "kind": "network_group_tunnel_vm", + "name": network_group_tunnel_name, + } + + UserNetworkGroupTunnelVMPayload.metadata = metadata + UserNetworkGroupTunnelVMPayload.spec = spec + + return UserNetworkGroupTunnelVMPayload, None diff --git a/framework/calm/dsl/builtins/models/network_group_tunnel_vm_spec.py b/framework/calm/dsl/builtins/models/network_group_tunnel_vm_spec.py new file mode 100644 index 0000000..8407b1f --- /dev/null +++ b/framework/calm/dsl/builtins/models/network_group_tunnel_vm_spec.py @@ -0,0 +1,68 @@ +from distutils.version import LooseVersion as LV + +from calm.dsl.db.table_config import AhvClustersCache, AhvSubnetsCache +from .validator import PropertyValidator +from .entity import EntityType, Entity +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version + +LOG = get_logging_handle(__name__) + + +# NetworkGroupTunnelVMSpec + + +class NetworkGroupTunnelVMSpecType(EntityType): + __schema_name__ = "NetworkGroupTunnelVMSpec" + __openapi_type__ = "network_group_tunnel_vm_spec" + + def compile(cls, **kwargs): + cdict = super().compile() + + cluster = cdict.pop("cluster", None) + account_uuid = None + + vpc = kwargs.get("vpc", None) + + if kwargs.get("account_uuid"): + LOG.debug("Found account_uuid passed from parent compile") + account_uuid = account_uuid + + if cluster: + cluster_dict = AhvClustersCache.get_entity_data( + cluster, account_uuid=account_uuid + ) + cdict["cluster_uuid"] = cluster_dict.get("uuid") + + subnet = cdict.pop("subnet", None) + if subnet: + subnet_dict = AhvSubnetsCache.get_entity_data( + subnet, account_uuid=account_uuid, vpc=vpc + ) + cdict["subnet_uuid"] = subnet_dict.get("uuid") + + return cdict + + +class NetworkGroupTunnelVMSpecValidator( + PropertyValidator, openapi_type="network_group_tunnel_vm_spec" +): + __default__ = {} + __kind__ = NetworkGroupTunnelVMSpecType + + +def network_group_tunnel_vm_spec(**kwargs): + name = kwargs.get("name", None) + bases = () + return NetworkGroupTunnelVMSpecType(name, bases, kwargs) + + +NetworkGroupTunnelVMSpec = network_group_tunnel_vm_spec() + + +def ahv_network_group_tunnel_vm_spec(cluster, subnet): + vm_spec_dict = {} + vm_spec_dict["cluster"] = cluster + vm_spec_dict["subnet"] = subnet + vm_spec_dict["type"] = "AHV" + return network_group_tunnel_vm_spec(**vm_spec_dict) diff --git a/framework/calm/dsl/builtins/models/node_visitor.py b/framework/calm/dsl/builtins/models/node_visitor.py new file mode 100644 index 0000000..13c52c6 --- /dev/null +++ b/framework/calm/dsl/builtins/models/node_visitor.py @@ -0,0 +1,336 @@ +import ast +import uuid + +from .task import meta +from .entity import EntityType +from .task import CalmTask, RunbookTask, TaskType +from .variable import CalmVariable, RunbookVariable, VariableType + + +def handle_meta_create(node, func_globals, prefix=None): + """ + helper for create parsing tasks and creating meta + """ + + node_visitor = GetCallNodes(func_globals, is_runbook=True, is_metatask_context=True) + try: + node_visitor.visit(node) + except Exception as ex: + raise ex + tasks, variables, task_list = node_visitor.get_objects() + + child_tasks = [] + for child_task in task_list: + if not isinstance(child_task, list): + child_task = [child_task] + child_tasks.extend(child_task) + + # First create the meta + if prefix is None: + prefix = str(uuid.uuid4())[-10:] + user_meta = meta(name=prefix + "_meta_task", child_tasks=child_tasks) + + return user_meta, tasks, variables + + +class GetCallNodes(ast.NodeVisitor): + + # TODO: Need to add validations for unsupported nodes. + def __init__( + self, func_globals, target=None, is_runbook=False, is_metatask_context=False + ): + self.task_list = [] + self.all_tasks = [] + self.variables = {} + self.target = target or None + self._globals = func_globals or {}.copy() + + # flag to check if this runbook is in context of RaaS, as decision, while, parallel tasks are supported only in RaaS + self.is_runbook = is_runbook + + # flag to check if tasks are in context of metatask + self.is_metatask = is_metatask_context + + def get_objects(self): + return self.all_tasks, self.variables, self.task_list + + def visit_Call(self, node, return_task=False): + sub_node = node.func + while not isinstance(sub_node, ast.Name): + sub_node = sub_node.value + py_object = eval(compile(ast.Expression(sub_node), "", "eval"), self._globals) + if py_object == CalmTask or RunbookTask or isinstance(py_object, EntityType): + task = eval(compile(ast.Expression(node), "", "eval"), self._globals) + if task is not None and isinstance(task, TaskType): + if self.target is not None and not task.target_any_local_reference: + task.target_any_local_reference = self.target + if return_task: + return task + self.task_list.append(task) + self.all_tasks.append(task) + return + return self.generic_visit(node) + + def visit_Assign(self, node): + if not isinstance(node.value, ast.Call): + return self.generic_visit(node) + sub_node = node.value.func + while not isinstance(sub_node, ast.Name): + sub_node = sub_node.value + if ( + eval(compile(ast.Expression(sub_node), "", "eval"), self._globals) + == CalmVariable + or eval(compile(ast.Expression(sub_node), "", "eval"), self._globals) + == RunbookVariable + ): + if len(node.targets) > 1: + raise ValueError( + "not enough values to unpack (expected {}, got 1)".format( + len(node.targets) + ) + ) + variable_name = node.targets[0].id + if variable_name in self.variables.keys(): + raise NameError("duplicate variable name {}".format(variable_name)) + variable = eval( + compile(ast.Expression(node.value), "", "eval"), self._globals + ) + if isinstance(variable, VariableType): + variable.name = variable_name + self.variables[variable_name] = variable + return + return self.generic_visit(node) + + def visit_With(self, node): + if len(node.items) > 1: + raise ValueError( + "Only a single context is supported in 'with' statements inside the action." + ) + context = eval( + compile(ast.Expression(node.items[0].context_expr), "", "eval"), + self._globals, + ) + if ( + not self.is_runbook + and hasattr(context, "__calm_type__") + and context.__calm_type__ == "parallel" + ): + parallel_tasks = [] + for statement in node.body: + if not isinstance(statement.value, ast.Call): + raise ValueError( + "Only calls to 'CalmTask' methods supported inside parallel context." + ) + task = self.visit_Call(statement.value, return_task=True) + if task: + parallel_tasks.append(task) + self.all_tasks.append(task) + self.task_list.append(parallel_tasks) + + # for parallel tasks in runbooks + elif ( + self.is_runbook + and hasattr(context, "__calm_type__") + and context.__calm_type__ == "parallel" + ): + if self.is_metatask: + raise ValueError( + "parallel is not supported in runbooks under decision or loop task context." + ) + if not node.items[0].optional_vars: + raise ValueError( + "Parallel task must be used in the format `with parallel as p`" + ) + _globals = self._globals.copy() + var = node.items[0].optional_vars.id + _globals.update({var: "var"}) + + parallel_tasks = [] + + for statement in node.body: + if not isinstance(statement, ast.With) or len(statement.items) > 1: + raise ValueError( + "Only a single context is supported in 'with' statements inside the parallel." + ) + statement_context = statement.items[0].context_expr + if ( + len(statement_context.args) != 1 + or not isinstance(statement_context.args[0], ast.Name) + or statement_context.args[0].id != var + ): + raise ValueError( + "Incorrect argument is passed in 'branch()', use 'with branch({})'".format( + var + ) + ) + statementContext = eval( + compile(ast.Expression(statement_context), "", "eval"), _globals + ) + if ( + hasattr(statementContext, "__calm_type__") + and statementContext.__calm_type__ == "branch" + ): + statementBody = ast.FunctionDef( + body=statement.body, col_offset=statement.col_offset + ) + _node_visitor = GetCallNodes(self._globals, is_runbook=True) + try: + _node_visitor.visit(statementBody) + except Exception as ex: + raise ex + tasks, variables, task_list = _node_visitor.get_objects() + if len(task_list) == 0: + raise ValueError( + "Atleast one task is required under parallel branch" + ) + parallel_tasks.append(task_list) + self.all_tasks.extend(tasks) + self.variables.update(variables) + else: + raise ValueError( + "Only with branch() contexts are supported under parallel context." + ) + + if len(parallel_tasks) > 0: + self.task_list.append(parallel_tasks) + + # for decision tasks + elif ( + self.is_runbook + and isinstance(context, TaskType) + and context.type == "DECISION" + ): + if not node.items[0].optional_vars: + raise ValueError( + "Decision task must be used in the format `with Task.Decision() as val`" + ) + var = node.items[0].optional_vars.id + success_path = None + failure_path = None + for statement in node.body: + + if ( + isinstance(statement, ast.If) + and isinstance(statement.test, ast.Compare) + and statement.test.left.value.id == var + and statement.test.left.attr == "exit_code" + and isinstance(statement.test.ops[0], ast.Eq) + and isinstance(statement.test.comparators[0], ast.Num) + ): + + if ( + len(statement.test.comparators) != 1 + or not isinstance(statement.test.comparators[0], ast.Num) + or statement.test.comparators[0].n not in [0, 1] + ): + raise ValueError( + "Decision task on supports only exit_code 0 and 1." + ) + + if statement.orelse: + raise ValueError( + "elif or else are not supported in 'if {}.exit_code == 0/1'".format( + var + ) + ) + + if statement.test.comparators[0].n == 0: + if success_path: + raise ValueError( + "'True' flow is defined more than once in {} task.".format( + context.name + ) + ) + success_path, tasks, variables = handle_meta_create( + statement, self._globals, prefix=context.name + "_success" + ) + self.all_tasks.extend([success_path] + tasks) + self.variables.update(variables) + + elif statement.test.comparators[0].n == 1: + if failure_path: + raise ValueError( + "'False' flow is defined more than once in {} task.".format( + context.name + ) + ) + failure_path, tasks, variables = handle_meta_create( + statement, self._globals, prefix=context.name + "_failure" + ) + self.all_tasks.extend([failure_path] + tasks) + self.variables.update(variables) + + elif ( + isinstance(statement, ast.If) + and isinstance(statement.test, ast.Attribute) + and statement.test.value.id == var + and statement.test.attr == "ok" + ): + + if success_path: + raise ValueError( + "'True' flow is defined more than once in {} task.".format( + context.name + ) + ) + ifBody = ast.FunctionDef( + body=statement.body, col_offset=node.col_offset + ) + success_path, tasks, variables = handle_meta_create( + ifBody, self._globals, prefix=context.name + "_success" + ) + self.all_tasks.extend([success_path] + tasks) + self.variables.update(variables) + + if statement.orelse: + if failure_path: + raise ValueError( + "'False' flow is defined more than once in {} task.".format( + context.name + ) + ) + elseBody = ast.FunctionDef( + body=statement.orelse, col_offset=node.col_offset + ) + failure_path, tasks, variables = handle_meta_create( + elseBody, self._globals, prefix=context.name + "_failure" + ) + self.all_tasks.extend([failure_path] + tasks) + self.variables.update(variables) + + else: + raise ValueError( + "Only 'if {}.exit_code == 0/1' or 'if {}.ok' statements are supported in decision context".format( + var, var + ) + ) + + if not success_path or not failure_path: + raise ValueError( + "Both 'True' and 'False' flows are required for decision task." + ) + + context.attrs["success_child_reference"] = success_path.get_ref() + context.attrs["failure_child_reference"] = failure_path.get_ref() + self.all_tasks.append(context) + self.task_list.append(context) + + # for while tasks + elif ( + self.is_runbook + and isinstance(context, TaskType) + and context.type == "WHILE_LOOP" + ): + whileBody = ast.FunctionDef(body=node.body, col_offset=node.col_offset) + meta_task, tasks, variables = handle_meta_create( + whileBody, self._globals, prefix=context.name + "_loop" + ) + self.all_tasks.extend([meta_task] + tasks) + self.variables.update(variables) + context.child_tasks_local_reference_list.append(meta_task.get_ref()) + self.all_tasks.append(context) + self.task_list.append(context) + else: + raise ValueError( + "Unsupported context used in 'with' statement inside the action." + ) diff --git a/framework/calm/dsl/builtins/models/object_type.py b/framework/calm/dsl/builtins/models/object_type.py new file mode 100644 index 0000000..b8835b7 --- /dev/null +++ b/framework/calm/dsl/builtins/models/object_type.py @@ -0,0 +1,142 @@ +import copy + +from .validator import PropertyValidator +from .entity import EntityDict +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class ObjectDict(EntityDict): + __is_object__ = True + + def __init__(self, validators, defaults, display_map): + self.validators = validators + self.defaults = defaults + self.display_map = display_map + self.__items_set__ = False + super().__init__(validators) + + def get_default(self, is_array): + return ( + self.__class__(self.validators, self.defaults, self.display_map) + if not is_array + else list + ) + + def __call__(self): + return self.__class__(self.validators, self.defaults, self.display_map) + + def __setitem__(self, name, value): + self.__items_set__ = True + super().__setitem__(name, value) + + def get_dict(self): + ret = {} + if not self.__items_set__: + return ret + for key, value in self.defaults.items(): + value = self.get(key, value()) + if getattr(value, "__is_object__", False): + ret[key] = value.get_dict() + else: + ret[key] = value + return ret + + def compile(self, cls): + ret = {} + if not self.__items_set__: + return ret + for key, value in self.defaults.items(): + value = self.get(key, value()) + if getattr(value, "__is_object__", False): + ret[self.display_map[key]] = value.compile(self) + else: + ret[self.display_map[key]] = value + return ret + + def pre_decompile(mcls, cdict, context, prefix=""): + + # Remove NULL and empty string data + attrs = {} + for k, v in cdict.items(): + if v is not None and v != "": + attrs[k] = v + + return attrs + + def decompile(cls, cdict, context=[], prefix=""): + + if not cdict: + return cdict + + cdict = cls.pre_decompile(cdict, context=context, prefix=prefix) + attrs = {} + display_map = copy.deepcopy(cls.display_map) + display_map = {v: k for k, v in display_map.items()} + + # reversing display map values + for k, v in cdict.items(): + if k not in display_map: + LOG.warning("Additional Property ({}) found".format(k)) + continue + + attrs.setdefault(display_map[k], v) + + # recursive decompile + validator_dict = cls.validators + for k, v in attrs.items(): + validator, is_array = validator_dict[k] + + if getattr(validator, "__is_object__", False): + # Case for recursive Object Dict + entity_type = validator + + else: + entity_type = validator.get_kind() + + # No decompilation is needed for entity_type = str, dict, int etc. + if hasattr(entity_type, "decompile"): + if is_array: + new_value = [] + for val in v: + new_value.append(entity_type.decompile(val, prefix=prefix)) + + else: + new_value = entity_type.decompile(v, prefix=prefix) + + attrs[k] = new_value + + # validate the new data + validator.validate(attrs[k], is_array) + + return attrs + + def _validate_item(self, value): + if not isinstance(value, dict): + raise TypeError("{} is not of type {}".format(value, "dict")) + new_value = self.__class__(self.validators, self.defaults, self.display_map) + for k, v in value.items(): + new_value[k] = v + return new_value + + def validate(self, value, is_array): + if not is_array: + if isinstance(value, type(None)): + return + return self._validate_item(value) + + else: + if not isinstance(value, list): + raise TypeError("{} is not of type {}".format(value, "list")) + + res_value = [] + for entity in value: + new_value = self._validate_item(entity) + res_value.append(new_value) + return res_value + + +class ObjectValidator(PropertyValidator, openapi_type="object"): + __default__ = ObjectDict + __kind__ = ObjectDict diff --git a/framework/calm/dsl/builtins/models/package.py b/framework/calm/dsl/builtins/models/package.py new file mode 100644 index 0000000..4364758 --- /dev/null +++ b/framework/calm/dsl/builtins/models/package.py @@ -0,0 +1,194 @@ +import sys + +from .entity import EntityType, Entity, EntityTypeBase +from .validator import PropertyValidator + +from .task import dag +from .action import runbook_create, _action_create +from .runbook import RunbookType +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) +# Package + + +class PackageType(EntityType): + __schema_name__ = "Package" + __openapi_type__ = "app_package" + + ALLOWED_SYSTEM_ACTIONS = { + "__install__": "action_install", + "__uninstall__": "action_uninstall", + } + + def compile(cls): + + cdict = {} + + # As downloadable images have no type attribute + # So just return it's compiled dict + if getattr(cls, "__kind__") == "app_vm_disk_package": + return super().compile() + + if getattr(cls, "type") == "K8S_IMAGE": + cdict = super().compile() + cdict["options"] = {} + + elif getattr(cls, "type") == "CUSTOM": + + def make_empty_runbook(action_name): + suffix = getattr(cls, "name", "") or cls.__name__ + user_dag = dag( + name="DAG_Task_for_Package_{}_{}".format(suffix, action_name), + target=cls.get_task_target(), + ) + return runbook_create( + name="Runbook_for_Package_{}_{}".format(suffix, action_name), + main_task_local_reference=user_dag.get_ref(), + tasks=[user_dag], + ) + + install_runbook = ( + getattr(getattr(cls, "__install__", None), "runbook", None) or None + ) + + # delattr(cls, "__install__") + if not install_runbook: + install_runbook = make_empty_runbook("action_install") + + uninstall_runbook = ( + getattr(getattr(cls, "__uninstall__", None), "runbook", None) or None + ) + + # delattr(cls, "__uninstall__") + if not uninstall_runbook: + uninstall_runbook = make_empty_runbook("action_uninstall") + + cdict = super().compile() + + # Remove image_spec field created during compile step + cdict.pop("image_spec", None) + cdict["options"] = { + "install_runbook": install_runbook, + "uninstall_runbook": uninstall_runbook, + } + # No actions are allowed other than __install__ and __uninstall__ + cdict.pop("action_list", None) + + elif getattr(cls, "type") == "SUBSTRATE_IMAGE": + cdict = super().compile() + if cdict.get("options"): + cdict["options"].pop("install_runbook", None) + cdict["options"].pop("uninstall_runbook", None) + cdict.pop("image_spec", None) + return cdict + + else: + ptype = getattr(cls, "type") + LOG.debug( + "Supported Package Types: ['SUBSTRATE_IMAGE', 'CUSTOM', 'K8S_IMAGE']" + ) + raise Exception("Un-supported package type {}".format(ptype)) + + return cdict + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + + package_type = cdict.get("type", "") or "CUSTOM" + if package_type == "CUSTOM" or package_type == "DEB": + cls = super().decompile(cdict, context=context, prefix=prefix) + options = cls.options + delattr(cls, "options") + + option_data = mcls.__validator_dict__["options"][0].decompile( + options, prefix=prefix + ) + + install_runbook = option_data["install_runbook"] + uninstall_runbook = option_data["uninstall_runbook"] + + install_tasks = install_runbook["task_definition_list"] + if len(install_tasks) > 1: + cls.__install__ = _action_create( + **{ + "name": "action_install", + "critical": True, + "type": "system", + "runbook": RunbookType.decompile( + install_runbook, prefix=prefix + ), + } + ) + + uninstall_tasks = uninstall_runbook["task_definition_list"] + if len(uninstall_tasks) > 1: + cls.__uninstall__ = _action_create( + **{ + "name": "action_uninstall", + "critical": True, + "type": "system", + "runbook": RunbookType.decompile( + uninstall_runbook, prefix=prefix + ), + } + ) + + elif package_type == "SUBSTRATE_IMAGE": + disk_pkg_data = { + "name": cdict["name"], + "description": cdict["description"], + "options": cdict["options"], + } + types = EntityTypeBase.get_entity_types() + VmDiskPackageType = types.get("VmDiskPackage", None) + if not VmDiskPackageType: + raise ModuleNotFoundError("VmDiskPackage Module not found.") + + cls = VmDiskPackageType.decompile(disk_pkg_data, prefix=prefix) + + elif package_type == "K8S_IMAGE": + LOG.error("Decompilation support for pod deployments is not available.") + sys.exit(-1) + + else: + LOG.debug( + "Supported Package Types: ['SUBSTRATE_IMAGE', 'CUSTOM', 'K8S_IMAGE']" + ) + LOG.error("Un-supported package type {}".format(package_type)) + sys.exit(-1) + + return cls + + def get_task_target(cls): + + # Target for package actions is the service, keeping this consistent between UI and DSL. + # Refer: https://jira.nutanix.com/browse/CALM-9182 + services = getattr(cls, "services", []) + if services: + return services[0] + raise ValueError("package do not have any service referenced") + + +class PackageValidator(PropertyValidator, openapi_type="app_package"): + __default__ = None + __kind__ = PackageType + + +def package(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return PackageType(name, bases, kwargs) + + +Package = package() diff --git a/framework/calm/dsl/builtins/models/patch_field.py b/framework/calm/dsl/builtins/models/patch_field.py new file mode 100644 index 0000000..ca5d803 --- /dev/null +++ b/framework/calm/dsl/builtins/models/patch_field.py @@ -0,0 +1,146 @@ +import sys + +from calm.dsl.log import get_logging_handle +from .entity import EntityType, Entity +from .config_attrs import patch_data_field, ahv_nic_ruleset, ahv_disk_ruleset +from .validator import PropertyValidator + +LOG = get_logging_handle(__name__) + + +class PatchField: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class Ahv: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class vcpu: + def __new__( + cls, value="0", operation="equal", max_val=0, min_val=0, editable=False + ): + return _data_field_create(value, operation, max_val, min_val, editable) + + class memory: + def __new__( + cls, value="0", operation="equal", max_val=0, min_val=0, editable=False + ): + return _data_field_create(value, operation, max_val, min_val, editable) + + class numsocket: + def __new__( + cls, value="0", operation="equal", max_val=0, min_val=0, editable=False + ): + return _data_field_create(value, operation, max_val, min_val, editable) + + class Nics: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class add: + def __new__(cls, nic_value, editable=False): + return _nic_create(nic_value, editable) + + class delete: + def __new__(cls, index=0, editable=False): + return _nic_operation(index, editable, "delete") + + class Disks: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class add: + def __new__(cls, disk_value, editable=False): + return _disk_create(disk_value, editable) + + class delete: + def __new__(cls, index=0): + return _disk_operation(index, False, "delete") + + class modify: + def __new__( + cls, + index=0, + editable=False, + value="0", + operation="equal", + max_val=0, + min_val=0, + ): + return _disk_operation( + index, editable, "modify", operation, value, min_val, max_val + ) + + class Category: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class add: + def __new__(cls, data): + val = [] + for k in data: + val.append("{}:{}".format(k, data[k])) + return {"operation": "add", "val": val} + + class delete: + def __new__(cls, data): + val = [] + for k in data: + val.append("{}:{}".format(k, data[k])) + return {"operation": "delete", "val": val} + + +def _data_field_create(value, operation, max_val, min_val, editable): + kwargs = { + "value": value, + "operation": operation, + "max_value": max_val, + "min_value": min_val, + "editable": editable, + } + + return patch_data_field(**kwargs) + + +def _nic_create(nic_value, editable): + kwargs = { + "nic_value": nic_value, + "editable": editable, + "operation": "add", + } + return ahv_nic_ruleset(**kwargs) + + +def _nic_operation(index, editable, operation): + kwargs = { + "index": str(index), + "editable": editable, + "operation": operation, + } + return ahv_nic_ruleset(**kwargs) + + +def _disk_operation( + index, editable, disk_operation, operation="", value=0, min_val=0, max_val=0 +): + kwargs = { + "index": index, + "editable": editable, + "operation": operation, + "disk_operation": disk_operation, + "value": str(value), + "max_value": max_val, + "min_value": min_val, + } + return ahv_disk_ruleset(**kwargs) + + +def _disk_create(disk_value, editable): + kwargs = { + "disk_value": disk_value, + "editable": editable, + "disk_operation": "add", + "operation": "equal", + } + return ahv_disk_ruleset(**kwargs) diff --git a/framework/calm/dsl/builtins/models/pod_deployment.py b/framework/calm/dsl/builtins/models/pod_deployment.py new file mode 100644 index 0000000..43d4d71 --- /dev/null +++ b/framework/calm/dsl/builtins/models/pod_deployment.py @@ -0,0 +1,186 @@ +from .entity import Entity +from .validator import PropertyValidator +from .provider_spec import provider_spec as get_provider_spec +from .deployment import DeploymentType +from .published_service import published_service +from .service import service +from .package import package +from .substrate import substrate +from .ref import ref +from .deployment import deployment +from .action import action +from inspect import signature +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) +# PODDeployment + +# Note parent class of PODDeploymentType is DeploymentType +# As deployments in profile class need to be of same type +# For macros of container, Use: +# "{}_{}_{}".format(dep.name, container_name, "PublishedService"), + + +class PODDeploymentType(DeploymentType): + __schema_name__ = "PODDeployment" + __openapi_type__ = "app_pod_deployment" + + def get_ref(cls, kind=None): + """Note: app_blueprint_deployment kind to be used for pod deployment""" + return super().get_ref(kind=DeploymentType.__openapi_type__) + + def extract_deployment(cls, is_simple_deployment=False): + """extract service, packages etc. from service and deployment spec""" + + service_definition_list = [] + package_definition_list = [] + substrate_definition_list = [] + published_service_definition_list = [] + deployment_definition_list = [] + + pub_service_name = cls.__name__ + "Published_Service" + if "apiVersion" in cls.service_spec: + del cls.service_spec["apiVersion"] + if "kind" in cls.service_spec: + del cls.service_spec["kind"] + ps_options = {"type": "PROVISION_K8S_SERVICE"} + ps_options = {**ps_options, **cls.service_spec} + + ps = published_service(name=pub_service_name, options=ps_options) + published_service_definition_list.append(ps) + + containers_list = cls.deployment_spec["spec"]["template"]["spec"].pop( + "containers", None + ) + + if not is_simple_deployment: + # In simple deployment there will be no explicit contianers + if len(containers_list) != len(cls.containers): + LOG.debug( + "No. of container services provided in entity {}: {}, while no. of containers provided in deployment spec: {}".format( + cls, len(cls.containers), len(containers_list) + ) + ) + raise Exception( + "No. of container services does not match k8s deployment spec" + ) + + container_action_map = {} + + for key, value in cls.__dict__.items(): + if isinstance(value, action): + sig = signature(value.user_func) + sig_paramter = sig.parameters.get("container_name", None) + if not sig_paramter: + raise Exception( + "container name not supplied action '{}' in deployment '{}'".format( + key, cls.__name__ + ) + ) + + container_name = sig_paramter.default + if container_action_map.get(container_name, None): + container_action_map[container_name].append((key, value)) + else: + container_action_map[container_name] = [(key, value)] + + package_references = [] + for ind, container in enumerate(containers_list): + img = container.pop("image", "") + img_pull_policy = container.pop("imagePullPolicy", None) + + container_name = container["name"].replace("-", "") + + if not is_simple_deployment: + s = cls.containers[ind] + s.container_spec = container + + else: + s = service( + name="{}_{}_{}".format(cls.__name__, container_name, "Service"), + container_spec=container, + ) + + if container_action_map.get(container_name, None): + for service_action in container_action_map[container_name]: + (name, func) = service_action + setattr(s, name, func) + container_action_map.pop(container_name, None) + + if img_pull_policy: + image_spec = {"image": img, "imagePullPolicy": img_pull_policy} + + else: + image_spec = {"image": img} + + p = package( + name="{}_{}_{}".format(cls.__name__, container_name, "Package"), + image_spec=image_spec, + type="K8S_IMAGE", + ) + p.services = [ref(s)] + package_references.append(ref(p)) + + # Storing services and packages to serivce list + service_definition_list.append(s) + package_definition_list.append(p) + + # If not existing container's name is provided in action, raise an Exception\ + if container_action_map: + raise Exception( + "Unknown containers : {} provided in action".format( + list(container_action_map.keys()) + ) + ) + + sub_provider_spec = cls.deployment_spec["spec"].pop("template", {}) + sub = substrate( + name="{}_{}_{}".format(cls.__name__, container_name, "Substrate"), + provider_type="K8S_POD", + provider_spec=get_provider_spec(sub_provider_spec), + ) + + substrate_definition_list.append(sub) + + dep_options = {"type": "PROVISION_K8S_DEPLOYMENT"} + if "apiVersion" in cls.deployment_spec: + del cls.deployment_spec["apiVersion"] + if "kind" in cls.deployment_spec: + del cls.deployment_spec["kind"] + dep_options = {**dep_options, **(cls.deployment_spec)} + d = deployment( + name=cls.__name__, # Dependecies depends on this name + options=dep_options, + type="K8S_DEPLOYMENT", + max_replicas="100", + ) + + d.published_services = [ref(ps)] + d.packages = package_references + d.substrate = ref(sub) + d.dependencies = getattr(cls, "dependencies") + + deployment_definition_list.append(d) + + return { + "service_definition_list": service_definition_list, + "package_definition_list": package_definition_list, + "substrate_definition_list": substrate_definition_list, + "published_service_definition_list": published_service_definition_list, + "deployment_definition_list": deployment_definition_list, + } + + +class PODDeploymentValidator(PropertyValidator, openapi_type="app_pod_deployment"): + __default__ = None + __kind__ = PODDeploymentType + + +def pod_deployment(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return PODDeploymentType(name, bases, kwargs) + + +PODDeployment = pod_deployment() diff --git a/framework/calm/dsl/builtins/models/port.py b/framework/calm/dsl/builtins/models/port.py new file mode 100644 index 0000000..f9b28fc --- /dev/null +++ b/framework/calm/dsl/builtins/models/port.py @@ -0,0 +1,24 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# Port + + +class PortType(EntityType): + __schema_name__ = "Port" + __openapi_type__ = "app_port" + + +class PortValidator(PropertyValidator, openapi_type="app_port"): + __default__ = None + __kind__ = PortType + + +def port(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return PortType(name, bases, kwargs) + + +Port = port() diff --git a/framework/calm/dsl/builtins/models/profile.py b/framework/calm/dsl/builtins/models/profile.py new file mode 100644 index 0000000..a328bab --- /dev/null +++ b/framework/calm/dsl/builtins/models/profile.py @@ -0,0 +1,211 @@ +import re +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .task import CalmTask, create_call_config, dag +from .ref import ref +from .action import action, _action_create +from .runbook import runbook_create +from .config_spec import SnapshotConfigSpecType, RestoreConfigSpecType +from calm.dsl.log import get_logging_handle +from .config_spec import PatchConfigSpecType + + +LOG = get_logging_handle(__name__) + +# Profile + + +class ProfileType(EntityType): + __schema_name__ = "Profile" + __openapi_type__ = "app_profile" + + def get_task_target(cls): + return + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + # TODO add support for decompilation of profile environment + cdict.pop("environment_reference_list", None) + + return cdict + + def compile(cls): + cdict = super().compile() + # description attribute in profile gives bp launch error: https://jira.nutanix.com/browse/CALM-19380 + cdict.pop("description", None) + + config_type_map = { + "restore": "AHV_RESTORE", + "snapshot": "AHV_SNAPSHOT", + "patch": "PATCH", + } + config_action_prefix_map = {"restore": "Restore_", "snapshot": "Snapshot_"} + action_names = list(map(lambda x: x.name, cdict["action_list"])) + + def make_runbook(config, target, action_name): + call_config_task = create_call_config( + target, config, "Call_Config_Task_{}".format(action_name) + ) + dag_task = dag( + "DAG_Task_{}".format(action_name), child_tasks=[call_config_task] + ) + return runbook_create( + name=action_name + "_runbook", + main_task_local_reference=dag_task.get_ref(), + tasks=[dag_task, call_config_task], + ) + + def set_config_type_based_on_target(config, config_type): + # Set the target to first deployment incase target for the config is not specified + + # deployment = config.attrs_list[0].target_any_local_reference.__self__ + # if deployment.substrate.__self__.provider_type == "AHV_VM": + # config.type = config_type_map[config_type] + # else: + # raise Exception( + if config.attrs_list[0]["target_any_local_reference"] is None: + config.attrs_list[0]["target_any_local_reference"] = ref( + cdict["deployment_create_list"][0] + ) + deployment = config.attrs_list[0]["target_any_local_reference"].__self__ + if deployment.substrate.__self__.provider_type == "AHV_VM": + config.type = config_type_map[config_type] + else: + LOG.error( + "Config is not supported for {} provider. Please try again after changing the provider".format( + deployment.substrate.__self__.provider_type + ) + ) + sys.exit( + "{} doesn't support {} config".format( + deployment.substrate.__self__.provider_type, config_type + ) + ) + return config + + def create_config_action_if_not_present(action_name, config): + if action_name not in action_names: + return _action_create( + **{ + "name": action_name, + "description": "", + "critical": True, + "type": "user", + "runbook": make_runbook( + ref(config), + config.attrs_list[0]["target_any_local_reference"], + action_name, + ), + } + ) + + def get_config_action_name(config, config_type): + suffix = config.name + if suffix.startswith(config_type.title() + "_Config"): + suffix = config.name.split(config_type.title() + "_Config")[1] + return config_action_prefix_map[config_type] + re.sub( + r"[^A-Za-z0-9-_]+", "_", suffix + ) + + if cdict.get("restore_config_list") and not cdict.get("snapshot_config_list"): + LOG.error( + "No RestoreConfig found. Please add/associate a RestoreConfig with the SnapshotConfig(s)." + ) + sys.exit("Missing snapshot configs") + + if cdict.get("snapshot_config_list") and not cdict.get("restore_config_list"): + LOG.error( + "No snapshot config found. Cannot use RestoreConfig without a SnapshotConfig." + ) + sys.exit("Missing restore configs") + for config in cdict.get("patch_list", []): + if not isinstance(config, PatchConfigSpecType): + LOG.error( + "{} is not an object of PatchConfig. patch_config is an array of PatchConfig objects".format( + config + ) + ) + sys.exit("{} is not an instance of PatchConfig".format(config)) + config = set_config_type_based_on_target(config, "patch") + + for config in cdict.get("restore_config_list", []): + if not isinstance(config, RestoreConfigSpecType): + LOG.error( + "{} is not an object of RestoreConfig. restore_configs is an array of AppProtection.RestoreConfig objects".format( + config + ) + ) + sys.exit("{} is not an instance of RestoreConfig".format(config)) + config = set_config_type_based_on_target(config, "restore") + a_name = get_config_action_name(config, "restore") + config_action = create_config_action_if_not_present(a_name, config) + if config_action: + cdict["action_list"].append(config_action) + + for config in cdict.get("snapshot_config_list", []): + if not isinstance(config, SnapshotConfigSpecType): + LOG.error( + "{} is not an object of SnapshotConfig. snapshot_configs is an array of AppProtection.SnapshotConfig objects".format( + config + ) + ) + sys.exit("{} is not an instance of SnapshotConfig".format(config)) + config = set_config_type_based_on_target(config, "snapshot") + if not config.config_references: + config.config_references = [ref(cdict["restore_config_list"][0])] + a_name = get_config_action_name(config, "snapshot") + config_action = create_config_action_if_not_present(a_name, config) + if config_action: + cdict["action_list"].append(config_action) + + # Set app_protection_policy, app_protection_rule references in corresponding restore config's attrs_list[0] + app_protection_policy_ref = config.attrs_list[0].get( + "app_protection_policy_reference", None + ) + app_protection_rule_ref = config.attrs_list[0].get( + "app_protection_rule_reference", None + ) + if app_protection_policy_ref and app_protection_rule_ref: + for restore_config_ref in config.config_references: + restore_config = restore_config_ref.__self__ + restore_config.attrs_list[0][ + "app_protection_policy_reference" + ] = app_protection_policy_ref + restore_config.attrs_list[0][ + "app_protection_rule_reference" + ] = app_protection_rule_ref + + environments = cdict.pop("environment_reference_list", []) + if len(environments) > 1: + LOG.error("Multiple environments are not allowed in a profile.") + sys.exit(-1) + + # Compile env first + environments = [_e.get_dict() for _e in environments] + environments = [_e["uuid"] for _e in environments] + + if environments: + cdict["environment_reference_list"] = environments + + return cdict + + +class ProfileValidator(PropertyValidator, openapi_type="app_profile"): + __default__ = None + __kind__ = ProfileType + + +def profile(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return ProfileType(name, bases, kwargs) + + +Profile = profile() diff --git a/framework/calm/dsl/builtins/models/project.py b/framework/calm/dsl/builtins/models/project.py new file mode 100644 index 0000000..3c9fee8 --- /dev/null +++ b/framework/calm/dsl/builtins/models/project.py @@ -0,0 +1,180 @@ +from distutils.version import LooseVersion as LV + +from calm.dsl.providers.base import get_provider + +from .entity import EntityType +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version + +LOG = get_logging_handle(__name__) + + +# Project + + +class ProjectType(EntityType): + __schema_name__ = "Project" + __openapi_type__ = "project" + + def compile(cls): + cdict = super().compile() + + cdict["account_reference_list"] = [] + cdict["subnet_reference_list"] = [] + cdict["external_network_list"] = [] + cdict["default_subnet_reference"] = {} + cdict["cluster_reference_list"] = [] + cdict["vpc_reference_list"] = [] + + CALM_VERSION = Version.get_version("Calm") + + # Populate accounts + provider_list = cdict.pop("provider_list", []) + for provider_obj in provider_list: + provider_data = provider_obj.get_dict() + + if provider_obj.type == "nutanix_pc": + + # From 3.5.0 we support Cluster & VPC whitelisting. Client has to take care + # of sending the respective cluster or vpc if not specified in Project Spec + if LV(CALM_VERSION) >= LV("3.5.0"): + # Get information about all subnets of this account + AhvVmProvider = get_provider("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + LOG.debug( + "provider_data_subnets:{}".format( + provider_data["subnet_reference_list"] + ) + ) + subnets_list = [ + subnet["uuid"] + for subnet in provider_data["subnet_reference_list"] + ] + external_subnets_list = [ + subnet["uuid"] + for subnet in provider_data["external_network_list"] + ] + subnets_list.extend(external_subnets_list) + filter_query = "" + if subnets_list: + filter_query = "_entity_id_=={}".format("|".join(subnets_list)) + account_uuid = provider_data["account_reference"]["uuid"] + subnets = AhvObj.subnets( + account_uuid=account_uuid, filter_query=filter_query + ) + subnets = subnets["entities"] + LOG.debug("Subnets are: {}".format(subnets)) + + spec_clusters = [ + cluster["uuid"] + for cluster in provider_data["cluster_reference_list"] + ] + spec_vpcs = [ + vpc["uuid"] for vpc in provider_data["vpc_reference_list"] + ] + + # Update provider_data with cluster/vpc of the subnets whose vpc/cluster have not been provided in Project Spec. + for subnet in subnets: + cluster_ref = subnet.get("status", {}).get( + "cluster_reference", {} + ) + vpc_ref = ( + subnet.get("status", {}) + .get("resources", {}) + .get("vpc_reference", {}) + ) + + if cluster_ref and cluster_ref["uuid"] not in spec_clusters: + LOG.debug( + "Cluster with uuid:{} not present in spec, adding".format( + cluster_ref["uuid"] + ) + ) + provider_data["cluster_reference_list"].append( + { + "kind": "cluster", + "name": cluster_ref.get("name", ""), + "uuid": cluster_ref["uuid"], + } + ) + spec_clusters.append(cluster_ref["uuid"]) + + elif vpc_ref and vpc_ref["uuid"] not in spec_vpcs: + LOG.debug( + "VPC with uuid:{} not present in spec, adding".format( + vpc_ref["uuid"] + ) + ) + provider_data["vpc_reference_list"].append( + { + "kind": "vpc", + "name": vpc_ref.get("name", ""), + "uuid": vpc_ref["uuid"], + } + ) + spec_vpcs.append(vpc_ref["uuid"]) + + if "subnet_reference_list" in provider_data: + cdict["subnet_reference_list"].extend( + provider_data["subnet_reference_list"] + ) + + if "external_network_list" in provider_data: + for _network in provider_data["external_network_list"]: + _network.pop("kind", None) + cdict["external_network_list"].append(_network) + + if "default_subnet_reference" in provider_data: + # From 3.2, only subnets from local account can be marked as default + if provider_data.get("subnet_reference_list") or LV( + CALM_VERSION + ) < LV("3.2.0"): + cdict["default_subnet_reference"] = provider_data[ + "default_subnet_reference" + ] + + if "cluster_reference_list" in provider_data: + cdict["cluster_reference_list"].extend( + provider_data.get("cluster_reference_list") + ) + + if "vpc_reference_list" in provider_data: + cdict["vpc_reference_list"].extend( + provider_data.get("vpc_reference_list") + ) + + if "account_reference" in provider_data: + cdict["account_reference_list"].append( + provider_data["account_reference"] + ) + + quotas = cdict.pop("quotas", None) + if quotas: + project_resources = [] + for qk, qv in quotas.items(): + if qk != "VCPUS": + qv *= 1073741824 + + project_resources.append({"limit": qv, "resource_type": qk}) + + cdict["resource_domain"] = {"resources": project_resources} + + # pop out unnecessary attibutes + cdict.pop("environment_definition_list", None) + # empty dict is not accepted for default_environment_reference + default_env = cdict.get("default_environment_reference") + if not default_env: + cdict.pop("default_environment_reference", None) + + if not cdict.get("default_subnet_reference"): + cdict.pop("default_subnet_reference", None) + return cdict + + +def project(**kwargs): + name = kwargs.get("name", None) + bases = () + return ProjectType(name, bases, kwargs) + + +Project = project() diff --git a/framework/calm/dsl/builtins/models/project_payload.py b/framework/calm/dsl/builtins/models/project_payload.py new file mode 100644 index 0000000..9bcc391 --- /dev/null +++ b/framework/calm/dsl/builtins/models/project_payload.py @@ -0,0 +1,52 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .project import ProjectType + + +# Blueprint Payload + + +class ProjectPayloadType(EntityType): + __schema_name__ = "ProjectPayload" + __openapi_type__ = "app_project_payload" + + +class ProjectPayloadValidator(PropertyValidator, openapi_type="app_project_payload"): + __default__ = None + __kind__ = ProjectPayloadType + + +def _project_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return ProjectPayloadType(name, bases, kwargs) + + +ProjectPayload = _project_payload() + + +def create_project_payload(UserProject): + + err = {"error": "", "code": -1} + + if UserProject is None: + err["error"] = "Given project is empty." + return None, err + + if not isinstance(UserProject, ProjectType): + err["error"] = "Given project is not of type Project" + return None, err + + spec = { + "name": UserProject.__name__, + "description": UserProject.__doc__ or "", + "resources": UserProject, + } + + metadata = {"spec_version": 1, "kind": "project", "name": UserProject.__name__} + + UserProjectPayload = _project_payload() + UserProjectPayload.metadata = metadata + UserProjectPayload.spec = spec + + return UserProjectPayload, None diff --git a/framework/calm/dsl/builtins/models/provider_spec.py b/framework/calm/dsl/builtins/models/provider_spec.py new file mode 100644 index 0000000..46b1ffe --- /dev/null +++ b/framework/calm/dsl/builtins/models/provider_spec.py @@ -0,0 +1,81 @@ +import os +import sys +import inspect +from ruamel import yaml +from calm.dsl.providers import get_provider +from calm.dsl.builtins import file_exists + +from .entity import EntityType +from .validator import PropertyValidator +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class ProviderSpecType(EntityType): + __schema_name__ = "ProviderSpec" + __openapi_type__ = "app_provider_spec" + + +class _ProviderSpec(metaclass=ProviderSpecType): + def __init__(self, spec): + + self.spec = spec + + def __validate__(self, provider_type): + + Provider = get_provider(provider_type) + Provider.validate_spec(self.spec) + + return self.spec + + def __get__(self, instance, cls): + + return self.__validate__(cls.provider_type) + + +class ProviderSpecValidator(PropertyValidator, openapi_type="app_provider_spec"): + __default__ = None + __kind__ = ProviderSpecType + + +def provider_spec(spec): + return _ProviderSpec(spec) + + +def read_spec(filename, depth=1): + file_path = os.path.join( + os.path.dirname(inspect.getfile(sys._getframe(depth))), filename + ) + + if not file_exists(file_path): + LOG.debug("file {} not found at location {}".format(filename, file_path)) + raise ValueError("file {} not found".format(filename)) + + with open(file_path, "r") as f: + spec = yaml.safe_load(f.read()) + + return spec + + +def read_provider_spec(filename): + spec = read_spec(filename, depth=2) + return provider_spec(spec) + + +def read_ahv_spec(filename, disk_packages={}): + spec = read_spec(filename, depth=2) + if disk_packages: + Provider = get_provider("AHV_VM") + Provider.update_vm_image_config(spec, disk_packages) + + return provider_spec(spec) + + +def read_vmw_spec(filename, vm_template=None): + spec = read_spec(filename, depth=2) + if vm_template: + Provider = get_provider("VMWARE_VM") + Provider.update_vm_image_config(spec, vm_template) + + return provider_spec(spec) diff --git a/framework/calm/dsl/builtins/models/providers.py b/framework/calm/dsl/builtins/models/providers.py new file mode 100644 index 0000000..c08a31c --- /dev/null +++ b/framework/calm/dsl/builtins/models/providers.py @@ -0,0 +1,98 @@ +from .entity import Entity, EntityType +from .validator import PropertyValidator +from .calm_ref import Ref +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) + + +class AccountProviderType(EntityType): + __schema_name__ = "AccountProvider" + __openapi_type__ = "app_account_provider" + + +class AccountProviderValidator(PropertyValidator, openapi_type="app_account_provider"): + __default__ = None + __kind__ = AccountProviderType + + +def account_provider(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return AccountProviderType(name, bases, kwargs) + + +# TODO Add validation on account passed as parameter is of same type as class +class Provider: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class Ntnx: + def __new__(cls, account, subnets=[], clusters=[], vpcs=[]): + # TODO add key check for `host_pc` instead of name + host_pc = False + if account["name"] == "NTNX_LOCAL_AZ": + host_pc = True + + return account_provider( + type="nutanix_pc", + account_reference=account, + external_network_list=subnets if not host_pc else [], + subnet_reference_list=subnets if host_pc else [], + default_subnet_reference=subnets[0] if subnets else {}, + cluster_reference_list=clusters, + vpc_reference_list=vpcs, + ) + + class Local_Az: + def __new__(cls, subnets=[], clusters=[], vpcs=[]): + + # TODO add key check for `host_pc` instead of name + account_name = "NTNX_LOCAL_AZ" + account = Ref.Account(account_name) + + return account_provider( + type="nutanix_pc", + account_reference=account, + subnet_reference_list=subnets, + default_subnet_reference=subnets[0] if subnets else {}, + cluster_reference_list=clusters, + vpc_reference_list=vpcs, + ) + + class Remote_Az: + def __new__(cls, account, subnets=[], clusters=[], vpcs=[]): + + return account_provider( + type="nutanix_pc", + account_reference=account, + external_network_list=subnets, + default_subnet_reference=subnets[0] if subnets else {}, + cluster_reference_list=clusters, + vpc_reference_list=vpcs, + ) + + class Aws: + def __new__(cls, account): + return account_provider(type="aws", account_reference=account) + + class Azure: + def __new__(cls, account): + return account_provider(type="azure", account_reference=account) + + class Gcp: + def __new__(cls, account): + return account_provider(type="gcp", account_reference=account) + + class Vmware: + def __new__(cls, account): + return account_provider(type="vmware", account_reference=account) + + class K8s: + def __new__(cls, account): + return account_provider(type="k8s", account_reference=account) + + class Custom_Provider: + def __new__(cls, account): + return account_provider(type="custom_provider", account_reference=account) diff --git a/framework/calm/dsl/builtins/models/published_service.py b/framework/calm/dsl/builtins/models/published_service.py new file mode 100644 index 0000000..830cc32 --- /dev/null +++ b/framework/calm/dsl/builtins/models/published_service.py @@ -0,0 +1,29 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# Service + + +class PublishedServiceType(EntityType): + __schema_name__ = "PublishedService" + __openapi_type__ = "app_published_service" + + def get_task_target(cls): + return cls.get_ref() + + +class PublishedServiceValidator( + PropertyValidator, openapi_type="app_published_service" +): + __default__ = None + __kind__ = PublishedServiceType + + +def published_service(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return PublishedServiceType(name, bases, kwargs) + + +PublishedService = published_service() diff --git a/framework/calm/dsl/builtins/models/readiness_probe.py b/framework/calm/dsl/builtins/models/readiness_probe.py new file mode 100644 index 0000000..2d86adc --- /dev/null +++ b/framework/calm/dsl/builtins/models/readiness_probe.py @@ -0,0 +1,33 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# Readiness Probe + + +class ReadinessProbeType(EntityType): + __schema_name__ = "ReadinessProbe" + __openapi_type__ = "app_readiness_probe" + + def compile(cls): + cdict = super().compile() + cred = cdict.pop("login_credential_local_reference", None) + # If cred is not None, reset it again + if cred: + cdict["login_credential_local_reference"] = cred + + return cdict + + +class ReadinessProbeValidator(PropertyValidator, openapi_type="app_readiness_probe"): + __default__ = None + __kind__ = ReadinessProbeType + + +def readiness_probe(**kwargs): + name = kwargs.pop("name", None) + bases = (Entity,) + return ReadinessProbeType(name, bases, kwargs) + + +ReadinessProbe = readiness_probe() diff --git a/framework/calm/dsl/builtins/models/ref.py b/framework/calm/dsl/builtins/models/ref.py new file mode 100644 index 0000000..b20e245 --- /dev/null +++ b/framework/calm/dsl/builtins/models/ref.py @@ -0,0 +1,48 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE + + +# Ref + + +class RefType(EntityType): + __schema_name__ = "Ref" + __openapi_type__ = "app_ref" + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + # Class name for ref objects should always be taken randomly + cdict["__name__"] = None + + return cdict + + def get_user_attrs(cls): + """returns user attrs for ref class""" + + attrs = super().get_user_attrs() + attrs.pop("__self__", None) # Not a user attr for reference object + + return attrs + + +class RefValidator(PropertyValidator, openapi_type="app_ref"): + __default__ = None + __kind__ = RefType + + +def _ref(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return RefType(name, bases, kwargs) + + +def ref(cls): + + if isinstance(cls, RefType): + return cls + + return cls.get_ref() diff --git a/framework/calm/dsl/builtins/models/runbook.py b/framework/calm/dsl/builtins/models/runbook.py new file mode 100644 index 0000000..396a3aa --- /dev/null +++ b/framework/calm/dsl/builtins/models/runbook.py @@ -0,0 +1,279 @@ +import ast +import sys +import inspect + +from .ref import ref +from .task import dag +from .entity import EntityType, Entity +from .endpoint import EndpointType +from .credential import CredentialType +from .ref import RefType +from .descriptor import DescriptorType +from .validator import PropertyValidator +from .node_visitor import GetCallNodes +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class RunbookType(EntityType): + __schema_name__ = "Runbook" + __openapi_type__ = "app_runbook" + + def __call__(*args, **kwargs): + pass + + @classmethod + def pre_decompile(mcls, cdict, context=[], prefix=""): + + cdict = super().pre_decompile(cdict, context=context, prefix=prefix) + # Removing additional attributes + cdict.pop("state", None) + cdict.pop("message_list", None) + return cdict + + +class RunbookValidator(PropertyValidator, openapi_type="app_runbook"): + __default__ = None + __kind__ = RunbookType + + +def _runbook(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return RunbookType(name, bases, kwargs) + + +Runbook = _runbook() + + +def runbook_create(**kwargs): + name = kwargs.get("name", kwargs.get("__name__", None)) + bases = (Entity,) + return RunbookType(name, bases, kwargs) + + +class runbook(metaclass=DescriptorType): + """ + runbook descriptor + """ + + def __init__(self, user_func): + """ + A decorator for generating runbooks from a function definition. + Args: + user_func (function): User defined function + Returns: + (Runbook): Runbook class + """ + + # Generate the entity names + self.action_name = user_func.__name__ + self.action_description = user_func.__doc__ or "" + self.user_func = user_func + self.user_runbook = None + self.task_target = None + + if self.__class__ == runbook: + self.__get__() + + def __call__(self, name=None): + pass + + def __get__(self, instance=None, cls=None): + """ + Translate the user defined function to an runbook. + Args: + instance (object): Instance of cls + cls (Entity): Entity that this runbook is defined on + Returns: + (RunbookType): Generated Runbook class + """ + # Get the task target + if hasattr(cls, "get_task_target") and getattr(cls, "__has_dag_target__", True): + self.task_target = cls.get_task_target() or self.task_target + + # Get the source code for the user function. + # Also replace tabs with 4 spaces. + src = inspect.getsource(self.user_func).replace("\t", " ") + + # Get the indent since this decorator is used within class definition + # For this we split the code on newline and count the number of spaces + # before the @runbook decorator. + # src = " @runbook\n def runbook1():\n CalmTask.Exec.ssh("Hello World")" + # The indentation here would be 4. + padding = src.split("\n")[0].rstrip(" ").split(" ").count("") + + # This recreates the source code without the indentation and the + # decorator. + new_src = "\n".join(line[padding:] for line in src.split("\n")[1:]) + + # Get all the child tasks by parsing the source code and visiting the + # ast.Call nodes. ast.Assign nodes become variables. + node = ast.parse(new_src) + func_globals = self.user_func.__globals__.copy() + + # for runbooks updating func_globals with endpoints and credentials passed in kwargs + if self.__class__ == runbook: + args = dict() + sig = inspect.signature(self.user_func) + for name, param in sig.parameters.items(): + args[name] = param.default + if args.get("credentials", []): + func_globals.update({"credentials": args["credentials"]}) + if args.get("endpoints", []): + func_globals.update({"endpoints": args["endpoints"]}) + + node_visitor = GetCallNodes( + func_globals, + target=self.task_target, + is_runbook=True if self.__class__ == runbook else False, + ) + try: + node_visitor.visit(node) + except Exception as ex: + LOG.exception(ex) + sys.exit(-1) + + tasks, variables, task_list = node_visitor.get_objects() + edges = [] + child_tasks = [] + + def create_edges(_task_list, from_task=None): + + if len(_task_list) == 0: + return + to_tasks = _task_list[0] + if not isinstance(to_tasks, list): + to_tasks = [to_tasks] + for to_task in to_tasks: + if isinstance(to_task, list): + create_edges(to_task, from_task=from_task) + else: + child_tasks.append(to_task) + if from_task: + edges.append((from_task.get_ref(), to_task.get_ref())) + + for from_tasks, to_tasks in zip(_task_list, _task_list[1:]): + if not isinstance(from_tasks, list): + from_tasks = [from_tasks] + if not isinstance(to_tasks, list): + to_tasks = [to_tasks] + for to_task in to_tasks: + if not isinstance(to_task, list): + child_tasks.append(to_task) + for from_task in from_tasks: + if isinstance(from_task, list): + raise ValueError( + "Tasks are not supported after parallel in runbooks" + ) + if isinstance(to_task, list) and len(from_tasks) == 1: + create_edges(to_task, from_task=from_task) + else: + edges.append((from_task.get_ref(), to_task.get_ref())) + + create_edges(task_list) + # Note - Server checks for name uniqueness in runbooks across actions + # Generate unique names using class name and func name. + prefix = ( + (getattr(cls, "name", "") or getattr(cls, "__name__", "")) + + "_" + + self.user_func.__name__ + if hasattr(cls, "__name__") or hasattr(cls, "name") + else "" + self.user_func.__name__ + ) + + runbook_name = prefix + "_runbook" + dag_name = prefix + "_dag" + + # First create the dag + self.user_dag = dag( + name=dag_name, + child_tasks=child_tasks if self.__class__ == runbook else tasks, + edges=edges, + target=self.task_target, + ) + + # Modify the user runbook + self.user_runbook = runbook_create(**{"name": runbook_name}) + self.user_runbook.main_task_local_reference = self.user_dag.get_ref() + self.user_runbook.tasks = [self.user_dag] + tasks + self.user_runbook.variables = [variable for variable in variables.values()] + + # Finally create the runbook service, only for runbook class not action + if self.__class__ == runbook: + args = dict() + sig = inspect.signature(self.user_func) + for name, param in sig.parameters.items(): + args[name] = param.default + + from .runbook_service import _runbook_service_create + + self.runbook = _runbook_service_create(**{"runbook": self.user_runbook}) + + credentials = args.pop("credentials", []) + endpoints = args.pop("endpoints", []) + default_target = args.pop("default", 0) + + for arg in args: + raise ValueError("{} is an unexpected argument.".format(arg)) + + if not isinstance(credentials, list): + raise TypeError("{} is not of type {}".format(credentials, list)) + for cred in credentials: + if not isinstance(cred, CredentialType): + raise TypeError("{} is not of type {}".format(cred, EndpointType)) + + if not isinstance(endpoints, list): + raise TypeError("{} is not of type {}".format(endpoints, list)) + for ep in endpoints: + if not isinstance(ep, EndpointType): + raise TypeError("{} is not of type {}".format(ep, EndpointType)) + if not ep.type: + raise ValueError( + "Existing endpoint {} are not allowed in endpoints argument.".format( + ep + ) + ) + + if default_target is not False: + if not isinstance(default_target, RefType) and not isinstance( + default_target, int + ): + raise TypeError( + "{} is not of type {} or {}".format( + default_target, RefType, "Integer" + ) + ) + elif isinstance(default_target, RefType): + self.runbook.default_target = default_target + elif len(endpoints) > 0: + if len(endpoints) <= int(default_target): + raise TypeError( + "No Endpoint preset at {} index for default Target".format( + int(default_target) + ) + ) + self.runbook.default_target = ref(endpoints[int(default_target)]) + + self.runbook.credentials = credentials + self.runbook.endpoints = endpoints + return self.runbook + + else: + return self.user_runbook + + +# helper function to get runbook json dump +def runbook_json(DslRunbook): + + if not isinstance(DslRunbook, runbook): + raise TypeError("{} is not of type {}".format(DslRunbook, runbook)) + return DslRunbook.runbook.json_dumps(pprint=True) + + +class branch: + __calm_type__ = "branch" + + def __new__(cls, *args, **kwargs): + return cls diff --git a/framework/calm/dsl/builtins/models/runbook_payload.py b/framework/calm/dsl/builtins/models/runbook_payload.py new file mode 100644 index 0000000..7592b58 --- /dev/null +++ b/framework/calm/dsl/builtins/models/runbook_payload.py @@ -0,0 +1,52 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .runbook import runbook + + +# Runbook Payload + + +class RunbookPayloadType(EntityType): + __schema_name__ = "RunbookPayload" + __openapi_type__ = "runbook_payload" + + +class RunbookPayloadValidator(PropertyValidator, openapi_type="runbook_payload"): + __default__ = None + __kind__ = RunbookPayloadType + + +def _runbook_payload(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return RunbookPayloadType(name, bases, kwargs) + + +RunbookPayload = _runbook_payload() + + +def create_runbook_payload(UserRunbook): + + err = {"error": "", "code": -1} + + if UserRunbook is None: + err["error"] = "Given runbook is empty." + return None, err + + if not isinstance(UserRunbook, runbook): + err["error"] = "Given runbook is not of type Runbook" + return None, err + + spec = { + "name": UserRunbook.action_name, + "description": UserRunbook.action_description or "", + "resources": UserRunbook.runbook, + } + + metadata = {"spec_version": 1, "kind": "runbook", "name": UserRunbook.action_name} + + UserRunbookPayload = _runbook_payload() + UserRunbookPayload.metadata = metadata + UserRunbookPayload.spec = spec + + return UserRunbookPayload, None diff --git a/framework/calm/dsl/builtins/models/runbook_service.py b/framework/calm/dsl/builtins/models/runbook_service.py new file mode 100644 index 0000000..7140aad --- /dev/null +++ b/framework/calm/dsl/builtins/models/runbook_service.py @@ -0,0 +1,35 @@ +from .action import EntityType, Entity +from .validator import PropertyValidator + +# RunbookService + + +class RunbookServiceType(EntityType): + __schema_name__ = "RunbookService" + __openapi_type__ = "runbook_service" + + def compile(cls): + cdict = super().compile() + if (cdict.get("default_target_reference", None) or None) is None: + cdict.pop("default_target_reference", None) + return cdict + + +class RunbookServiceValidator(PropertyValidator, openapi_type="runbook_service"): + __default__ = None + __kind__ = RunbookServiceType + + +def runbook_service(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return RunbookServiceType(name, bases, kwargs) + + +RunbookService = runbook_service() + + +def _runbook_service_create(**kwargs): + name = kwargs.get("name", kwargs.get("__name__", None)) + bases = (RunbookService,) + return RunbookServiceType(name, bases, kwargs) diff --git a/framework/calm/dsl/builtins/models/schema.py b/framework/calm/dsl/builtins/models/schema.py new file mode 100644 index 0000000..73dc885 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schema.py @@ -0,0 +1,191 @@ +""" Schema should be according to OpenAPI 3 format with x-calm-dsl-type extension""" + +import json +from copy import deepcopy +from io import StringIO +from distutils.version import LooseVersion as LV + +from ruamel import yaml +from jinja2 import Environment, PackageLoader +import jsonref +from bidict import bidict + +from .validator import get_property_validators +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle + + +LOG = get_logging_handle(__name__) +_SCHEMAS = None + + +def _get_all_schemas(): + global _SCHEMAS + if not _SCHEMAS: + _SCHEMAS = _load_all_schemas() + return _SCHEMAS + + +def _load_all_schemas(schema_file="main.yaml.jinja2"): + + loader = PackageLoader(__name__, "schemas") + env = Environment(loader=loader) + template = env.get_template(schema_file) + + tdict = yaml.safe_load(StringIO(template.render())) + + # Check if all references are resolved + tdict = jsonref.loads(json.dumps(tdict)) + # print(json.dumps(tdict, cls=EntityJSONEncoder, indent=4, separators=(",", ": "))) + + schemas = tdict["components"]["schemas"] + return schemas + + +def get_schema(name): + + schemas = _get_all_schemas() + schema = schemas.get(name, None) + if not schema: + LOG.debug("Schema name can be one of {}".format(list(schemas.keys()))) + raise TypeError("Invalid schema name {} given".format(name)) + + return schema + + +def get_schema_props(name): + schema = get_schema(name) + schema_props = schema.get("properties", None) + schema_type = schema.get("x-calm-dsl-type", None) + if schema_type == "app_descriptor": + schema_props = {} + elif schema_type == "app_provider_spec": + schema_props = {} + elif schema_type == "app_calm_ref": + schema_props = {} + elif not schema_props: + LOG.debug("Schema properties for schema {} is not available".format(name)) + raise TypeError("Invalid schema name {} given".format(name)) + + return schema_props + + +def get_validator_details(schema_props, name): + + object_type = False + is_array = False + object_validators = {} + object_defaults = {} + object_display_map = {} + + props = schema_props.get(name, None) + if props is None: + raise Exception("Invalid schema {} given".format(props)) + + type_ = props.get("type", None) + if type_ is None: + raise Exception("Invalid schema {} given".format(schema_props)) + + if type_ == "object": + type_ = props.get("x-calm-dsl-type", None) + if type_ is None: + raise Exception("x-calm-dsl-type extension for {} not found".format(name)) + elif type_ == "object": + object_type = True + for name in props.get("properties", {}): + attr_props = props["properties"].get(name, dict()) + calm_version = Version.get_version("Calm") + + # dev machines do not follow standard version protocols. Avoid matching there + attribute_min_version = str( + attr_props.get("x-calm-dsl-min-version", "") + ) + if not calm_version: + calm_version = "2.9.0" # Raise warning and set default to 2.9.0 + + # If attribute version is less than calm version, ignore it + if attribute_min_version and LV(attribute_min_version) > LV( + calm_version + ): + continue + + validator, is_array, default = get_validator_details( + props["properties"], name + ) + attr_name = props["properties"][name].get( + "x-calm-dsl-display-name", name + ) + object_validators[attr_name] = (validator, is_array) + object_display_map[attr_name] = name + + if attr_props.get("x-calm-dsl-default-required", True): + object_defaults[attr_name] = default + + if type_ == "array": + item_props = props.get("items", None) + item_type = item_props.get("type", None) + if item_type is None: + LOG.debug("Item type not found in schema {}".format(item_props)) + raise Exception("Invalid schema {} given".format(item_props)) + + ValidatorType, _, _ = get_validator_details(props, "items") + return ValidatorType, True, list + + property_validators = get_property_validators() + ValidatorType = property_validators.get(type_, None) + if object_type: + ValidatorType = ValidatorType.__kind__( + object_validators, object_defaults, object_display_map + ) + if ValidatorType is None: + raise TypeError("Type {} not supported".format(type_)) + + # Get default from schema if given, else set default from validator type + class NotDefined: + pass + + default = None + schema_default = props.get("default", NotDefined) + if schema_default is NotDefined: + class_default = ValidatorType.get_default(is_array) + default = class_default + else: + default = lambda: deepcopy(schema_default) # noqa: E731 + + return ValidatorType, is_array, default + + +def get_validators_with_defaults(schema_props): + + validators = {} + defaults = {} + display_map = bidict() + for name, props in schema_props.items(): + calm_version = Version.get_version("Calm") + + # dev machines do not follow standard version protocols. Avoid matching there + attribute_min_version = str(props.get("x-calm-dsl-min-version", "")) + if not calm_version: + # Raise warning and set default to 2.9.0 + calm_version = "2.9.0" + + # If attribute version is less than calm version, ignore it + if attribute_min_version and LV(attribute_min_version) > LV(calm_version): + continue + + ValidatorType, is_array, default = get_validator_details(schema_props, name) + attr_name = props.get("x-calm-dsl-display-name", name) + validators[attr_name] = (ValidatorType, is_array) + if props.get("x-calm-dsl-default-required", True): + defaults[attr_name] = default + display_map[attr_name] = name + + return validators, defaults, display_map + + +def get_schema_details(schema_name): + + schema_props = get_schema_props(schema_name) + validators, defaults, display_map = get_validators_with_defaults(schema_props) + + return schema_props, validators, defaults, display_map diff --git a/framework/calm/dsl/builtins/models/schemas/account_provider.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/account_provider.yaml.jinja2 new file mode 100644 index 0000000..9f38b9e --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/account_provider.yaml.jinja2 @@ -0,0 +1,44 @@ +{% macro AccountProvider() -%} + +title: Account Provider +type: object +x-calm-dsl-type: app_account_provider +properties: + type: + type: string + account_reference: + type: object + x-calm-dsl-type: app_calm_ref + subnet_reference_list: + type: array + items: + type: object + x-calm-dsl-type: app_calm_ref + external_network_list: + type: array + items: + type: object + x-calm-dsl-type: app_calm_ref + default_subnet_reference: + type: object + x-calm-dsl-type: app_calm_ref + cluster_reference_list: + type: array + items: + type: object + x-calm-dsl-type: app_calm_ref + vpc_reference_list: + type: array + items: + type: object + x-calm-dsl-type: app_calm_ref + +{%- endmacro %} + + +{% macro AccountProviderSchema() -%} + +AccountProvider: + {{ AccountProvider() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/action.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/action.yaml.jinja2 new file mode 100644 index 0000000..6bd6775 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/action.yaml.jinja2 @@ -0,0 +1,26 @@ +{% macro Action() -%} + +title: Action +type: object +x-calm-dsl-type: app_action +properties: + name: + type: string + description: + type: string + type: + type: string + critical: + type: boolean + runbook: + $ref: '#/components/schemas/Runbook' + +{%- endmacro %} + + +{% macro ActionSchema() -%} + +Action: + {{ Action()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/ahv_recovery_vm.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/ahv_recovery_vm.yaml.jinja2 new file mode 100644 index 0000000..e41e6c1 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/ahv_recovery_vm.yaml.jinja2 @@ -0,0 +1,63 @@ +{% import "ref.yaml.jinja2" as ref %} + + +{% macro AhvVmRecoveryResources() -%} + +title: Ahv VM Post Recovery Override Resources +type: object +x-calm-dsl-type: recovery_vm_ahv_resources +properties: + nic_list: + x-calm-dsl-display-name: nics + type: array + items: + $ref: '#/components/schemas/AhvNic' + num_vcpus_per_socket: + type: integer + x-calm-dsl-display-name: cores_per_vCPU + default: 1 + num_sockets: + type: integer + x-calm-dsl-display-name: vCPUs + default: 2 + memory_size_mib: + x-calm-dsl-display-name: memory + type: integer + default: 4 + account_uuid: + type: string + gpu_list: + type: array + x-calm-dsl-display-name: gpus + items: + $ref: '#/components/schemas/AhvGpu' + +{%- endmacro %} + + +{% macro AhvVmRecoverySpec() -%} + +title: AhvVmRecoverySpec +type: object +x-calm-dsl-type: recovery_vm_ahv_spec +properties: + vm_name: + type: string + vm_override_resources: + type: object + x-calm-dsl-type: recovery_vm_ahv_resources + recovery_point: + type: object + x-calm-dsl-type: app_calm_ref + +{%- endmacro -%} + + +{% macro AhvRecoveryVmSchema() -%} + +AhvVmRecoverySpec: + {{ AhvVmRecoverySpec() | indent(2) }} +AhvVmRecoveryResources: + {{ AhvVmRecoveryResources() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/ahv_vm.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/ahv_vm.yaml.jinja2 new file mode 100644 index 0000000..c0e8683 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/ahv_vm.yaml.jinja2 @@ -0,0 +1,294 @@ +{% import "ref.yaml.jinja2" as ref %} +{% import "calm_ref.yaml.jinja2" as calm_ref %} + + +{% macro AhvVmVpc() -%} +type: object +x-calm-dsl-type: vm_ahv_vpc +properties: + uuid: + type: string + kind: + type: string + default: vpc + name: + type: string + +{%- endmacro %} + + +{% macro AhvNic() -%} + +title: AHV NIC +type: object +x-calm-dsl-type: vm_ahv_nic +properties: + network_function_nic_type: + type: string + default: INGRESS + nic_type: + type: string + default: NORMAL_NIC + subnet_reference: + type: object + x-calm-dsl-type: object + properties: + uuid: + type: string + kind: + type: string + default: subnet + name: + type: string + cluster: + type: string + default: null + network_function_chain_reference: + type: object + x-calm-dsl-type: object + properties: + uuid: + type: string + kind: + type: string + default: network_function_chain + name: + type: string + default: null + mac_address: + type: string + ip_endpoint_list: + type: array + items: + type: object + x-calm-dsl-type: object + properties: + ip: + type: string + type: + type: string + enum: [ASSIGNED, LEARNED] + vpc_reference: + {{ AhvVmVpc() | indent(4) }} + x-calm-dsl-min-version: 3.5.0 + x-calm-dsl-not-required-if-none: true + + +{%- endmacro %} + + +{% macro AhvDisk() -%} + +title: AHV Disk +type: object +x-calm-dsl-type: vm_ahv_disk +properties: + data_source_reference: + type: object + x-calm-dsl-type: object + properties: + name: + type: string + kind: + type: string + enum: [image] + default: image + uuid: + type: string + default: null + device_properties: + type: object + x-calm-dsl-type: object + properties: + device_type: + type: string + default: DISK + disk_address: + type: object + x-calm-dsl-type: object + properties: + device_index: + type: integer + default: 0 + adapter_type: + type: string + default: SCSI + disk_size_mib: + type: integer + default: 0 + bootable: + type: boolean + default: False + +{%- endmacro %} + + +{% macro AhvGuestCustomization() -%} + +title: Ahv Guest Customization +type: object +x-calm-dsl-type: vm_ahv_gc +properties: + sysprep: + type: object + x-calm-dsl-type: object + properties: + unattend_xml: + type: string + install_type: + type: string + default: PREPARED + is_domain: + type: boolean + default: False + domain: + type: string + dns_ip: + type: string + dns_search_path: + type: string + domain_credential_reference: + x-calm-dsl-display-name: credential + {{ ref.Ref() | indent(8) }} + default: null + cloud_init: + type: object + x-calm-dsl-type: object + properties: + user_data: + type: string + default: null + +{%- endmacro %} + + +{% macro AhvGpu() -%} + +title: AHV GPU +type: object +x-calm-dsl-type: vm_ahv_gpu +properties: + vendor: + type: string + mode: + type: string + device_id: + type: integer + default: -1 + +{%- endmacro %} + + +{% macro AhvVmResources() -%} + +title: AHV Resources +type: object +x-calm-dsl-type: vm_ahv_resources +properties: + nic_list: + x-calm-dsl-display-name: nics + type: array + items: + $ref: '#/components/schemas/AhvNic' + num_vcpus_per_socket: + type: integer + x-calm-dsl-display-name: cores_per_vCPU + default: 1 + num_sockets: + type: integer + x-calm-dsl-display-name: vCPUs + default: 2 + memory_size_mib: + x-calm-dsl-display-name: memory + type: integer + default: 4 + power_state: + type: string + enum: [ON, OFF] + default: ON + account_uuid: + type: string + gpu_list: + type: array + x-calm-dsl-display-name: gpus + items: + $ref: '#/components/schemas/AhvGpu' + disk_list: + type: array + x-calm-dsl-display-name: disks + items: + $ref: '#/components/schemas/AhvDisk' + boot_type: # Will merge to boot_config at compile time + type: string + enum: [LEGACY, UEFI] + default: LEGACY + guest_customization: + $ref : '#/components/schemas/AhvGuestCustomization' + serial_port_list: + type: object + x-calm-dsl-display-name: serial_ports + x-calm-dsl-type: dict + boot_config: + type: object + x-calm-dsl-type: dict + +{%- endmacro %} + + +{% macro AhvVmCluster() -%} +type: object +x-calm-dsl-type: vm_ahv_cluster +properties: + uuid: + type: string + kind: + type: string + default: cluster + name: + type: string + +{%- endmacro %} + + +{% macro AhvVm() -%} + +title: AHV VM Create Spec +type: object +x-calm-dsl-type: vm_ahv +properties: + name: + type: string + categories: + type: object + x-calm-dsl-type: dict + cluster_reference: + x-calm-dsl-display-name: cluster + x-calm-dsl-min-version: 3.5.0 + x-calm-dsl-not-required-if-none: true + {{ calm_ref.CalmRef() | indent(4) }} + resources: + $ref: '#/components/schemas/AhvVmResources' + +{%- endmacro %} + + +{% macro AhvVmSchema() -%} + +AhvDisk: + {{ AhvDisk() | indent(2) }} +AhvNic: + {{ AhvNic() | indent(2) }} +AhvGuestCustomization: + {{ AhvGuestCustomization() | indent(2) }} +AhvVmResources: + {{ AhvVmResources() | indent(2) }} +AhvVm: + {{ AhvVm() | indent(2) }} +AhvGpu: + {{ AhvGpu() | indent(2) }} +AhvVmVpc: + {{ AhvVmVpc() | indent(2) }} +AhvVmCluster: + {{ AhvVmCluster() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/blueprint.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/blueprint.yaml.jinja2 new file mode 100644 index 0000000..10eb360 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/blueprint.yaml.jinja2 @@ -0,0 +1,64 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro Blueprint() -%} + +title: Blueprint +type: object +x-calm-dsl-type: app_blueprint +properties: + type: + type: string + default: 'USER' + service_definition_list: + x-calm-dsl-display-name: services + description: Service definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Service' + package_definition_list: + x-calm-dsl-display-name: packages + description: Package definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Package' + published_service_definition_list: + x-calm-dsl-display-name: published_services + description: Published Service defination for K8S pods + type: array + items: + $ref: '#/components/schemas/PublishedService' + substrate_definition_list: + x-calm-dsl-display-name: substrates + description: Substrate definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Substrate' + credential_definition_list: + x-calm-dsl-display-name: credentials + description: Credential definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Credential' + app_profile_list: + x-calm-dsl-display-name: profiles + description: App profile definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Profile' + client_attrs: + type: object + additionalProperties: true + x-calm-dsl-type: dict + default_credential_local_reference: + x-calm-dsl-display-name: default_cred + {{ ref.Ref() | indent(4) }} + +{%- endmacro %} + + +{% macro BlueprintSchema() -%} + +Blueprint: + {{ Blueprint()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/blueprint_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/blueprint_payload.yaml.jinja2 new file mode 100644 index 0000000..4f948bf --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/blueprint_payload.yaml.jinja2 @@ -0,0 +1,37 @@ +{% macro BlueprintPayload() -%} + +title: BlueprintPayload +type: object +x-calm-dsl-type: app_blueprint_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Blueprint name + description: + type: string + description: Blueprint description + resources: + type: object + properties: + $ref: '#/components/schemas/Blueprint' + + api_version: + type: string + default: "3.0" + + metadata: # TODO - add other fields including categories, etc + $ref: '#/components/schemas/Metadata' + +{%- endmacro %} + + +{% macro BlueprintPayloadSchema() -%} + +BlueprintPayload: + {{ BlueprintPayload()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/brownfield_deployment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/brownfield_deployment.yaml.jinja2 new file mode 100644 index 0000000..987fbbf --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/brownfield_deployment.yaml.jinja2 @@ -0,0 +1,42 @@ +{% import "ref.yaml.jinja2" as ref %} +{% import "brownfield_vm.yaml.jinja2" as brownfield_vm %} + +{% macro BrownfieldDeployment() -%} + +title: BrownfieldDeployment +type: object +x-calm-dsl-type: app_brownfield_deployment +properties: + published_service_local_reference_list: + x-calm-dsl-display-name: published_services + type: array + items: + $ref: '#/components/schemas/Ref' + package_local_reference_list: + x-calm-dsl-display-name: packages + type: array + items: + $ref: '#/components/schemas/Ref' + substrate_local_reference: + x-calm-dsl-display-name: substrate + {{ ref.Ref() | indent(4) }} + name: + type: string + brownfield_instance_list: + x-calm-dsl-display-name: instances + type: array + items: + $ref: '#/components/schemas/BrownfieldVm' + type: + type: string + default: BROWNFIELD + +{%- endmacro %} + + +{% macro BrownfieldDeploymentSchema() -%} + +BrownfieldDeployment: + {{ BrownfieldDeployment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/brownfield_vm.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/brownfield_vm.yaml.jinja2 new file mode 100644 index 0000000..1f0a130 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/brownfield_vm.yaml.jinja2 @@ -0,0 +1,28 @@ +{% macro BrownfieldVm() -%} + +title: BrownfieldVm +type: object +x-calm-dsl-type: app_brownfield_vm +properties: + instance_name: + type: string + instance_id: + type: string + address: + type: array + items: + type: string + provider: + type: string + account_uuid: + type: string + +{%- endmacro %} + + +{% macro BrownfieldVmSchema() -%} + +BrownfieldVm: + {{ BrownfieldVm()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/calm_ref.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/calm_ref.yaml.jinja2 new file mode 100644 index 0000000..f3c7742 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/calm_ref.yaml.jinja2 @@ -0,0 +1,15 @@ +{% macro CalmRef() -%} + +title: CalmRef +type: object +x-calm-dsl-type: app_calm_ref + +{%- endmacro -%} + + +{% macro CalmRefSchema() -%} + +CalmRef: + {{ CalmRef()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/config_attrs.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/config_attrs.yaml.jinja2 new file mode 100644 index 0000000..e546dc8 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/config_attrs.yaml.jinja2 @@ -0,0 +1,121 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro AhvDiskRuleset() -%} +title: AHV Disk rules +type: object +x-calm-dsl-type: ahv_disk_rule +properties: + disk_operation: + type: string + operation: + type: string + editable: + type: boolean + default: False + value: + type: string + max_value: + type: integer + min_value: + type: integer + index: + type: integer + disk_value: + $ref: '#/components/schemas/AhvDisk' +{%- endmacro %} + +{% macro AhvNicRuleset() -%} +title: AHV NIC rules +type: object +x-calm-dsl-type: ahv_nic_rule +properties: + operation: + type: string + editable: + type: boolean + default: False + index: + type: string + nic_value: + $ref: '#/components/schemas/AhvNic' +{%- endmacro %} + +{% macro PatchDataField() -%} +title: PatchDataField +type: object +x-calm-dsl-type: patch_data_field +properties: + type: + type: string + operation: + type: string + editable: + type: boolean + default: False + value: + type: string + max_value: + type: integer + min_value: + type: integer +{%- endmacro %} + +{% macro ConfigAttrs() -%} +title: ConfigAttrs +type: object +x-calm-dsl-type: config_attrs +additionalProperties: true +properties: + memory: + $ref : '#/components/schemas/PatchDataField' + vcpu: + $ref : '#/components/schemas/PatchDataField' + numsocket: + $ref : '#/components/schemas/PatchDataField' + disk_delete: + type: boolean + default: False + nic_delete: + type: boolean + default: False + categories_add: + type: boolean + default: False + categories_delete: + type: boolean + default: False + nics: + type: array + items: + $ref : '#/components/schemas/AhvNicRuleset' + disks: + type: array + items: + $ref : '#/components/schemas/AhvDiskRuleset' + categories: + type: array + items: + additionalProperties: true + type: object + x-calm-dsl-type: dict +{%- endmacro %} + +{% macro AhvDiskRulesetSchema() -%} +AhvDiskRuleset: + {{ AhvDiskRuleset()|indent(2) }} +{%- endmacro %} + +{% macro AhvNicRulesetSchema() -%} +AhvNicRuleset: + {{ AhvNicRuleset()|indent(2) }} +{%- endmacro %} + +{% macro PatchDataFieldSchema() -%} +PatchDataField: + {{ PatchDataField()|indent(2) }} +{%- endmacro %} + +{% macro ConfigAttrsSchema() -%} +ConfigAttrs: + {{ ConfigAttrs()|indent(2) }} +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/config_spec.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/config_spec.yaml.jinja2 new file mode 100644 index 0000000..fcc6b26 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/config_spec.yaml.jinja2 @@ -0,0 +1,40 @@ +{% macro ConfigSpec() -%} +title: ConfigSpec +type: object +x-calm-dsl-type: app_config_spec +properties: + name: + type: string + description: + type: string + type: + type: string + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + config_reference_list: + x-calm-dsl-display-name: config_references + type: array + items: + $ref: '#/components/schemas/Ref' + patch_attrs: + type: array + items: + $ref: '#/components/schemas/ConfigAttrs' + attrs_list: + type: array + items: + type: object + additionalProperties: true + x-calm-dsl-type: dict +{%- endmacro %} + + +{% macro ConfigSpecSchema() -%} + +ConfigSpec: + {{ ConfigSpec()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/credential.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/credential.yaml.jinja2 new file mode 100644 index 0000000..dd803d2 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/credential.yaml.jinja2 @@ -0,0 +1,74 @@ +{% macro Credential() -%} + +title: Credential +type: object +x-calm-dsl-type: app_credential +properties: + name: + type: string + description: + type: string + type: + type: string + default: 'PASSWORD' + username: + type: string + resource_type_reference: + x-calm-dsl-display-name: resource_type + description: Resource type reference for credentials. + x-calm-dsl-type: app_calm_ref + type: object + x-calm-dsl-min-version: 3.4.0 + x-calm-dsl-default-required: false + account_reference: + x-calm-dsl-display-name: account + description: Account reference for credentials. + x-calm-dsl-type: app_calm_ref + type: object + x-calm-dsl-min-version: 3.4.0 + x-calm-dsl-default-required: false + secret: + type: object + additionalProperties: true + x-calm-dsl-type: dict + default: {"attrs": {"is_secret_modified": False}, "value": ""} + passphrase: + type: object + x-calm-dsl-type: dict + x-calm-dsl-default-required: false + default: {"attrs": {"is_secret_modified": False}, "value": ""} + variable_list: + type: array + x-calm-dsl-min-version: 3.4.0 + x-calm-dsl-default-required: false + items: + $ref: '#/components/schemas/Variable' + cred_class: + type: string + x-calm-dsl-min-version: 3.4.0 + default: 'static' + default: + type: boolean + default: false + editables: + type: object + x-calm-dsl-type: object + properties: + username: + type: boolean + secret: + type: boolean + resource_type_reference: + type: boolean + x-calm-dsl-min-version: 3.4.0 + x-calm-dsl-default-required: false + +{%- endmacro %} + + +{% macro CredentialSchema() -%} + +Credential: + {{ Credential()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/deployment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/deployment.yaml.jinja2 new file mode 100644 index 0000000..0e97782 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/deployment.yaml.jinja2 @@ -0,0 +1,77 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro Deployment() -%} + +title: Deployment +type: object +x-calm-dsl-type: app_blueprint_deployment +properties: + published_service_local_reference_list: + x-calm-dsl-display-name: published_services + type: array + items: + $ref: '#/components/schemas/Ref' + package_local_reference_list: + x-calm-dsl-display-name: packages + type: array + items: + $ref: '#/components/schemas/Ref' + substrate_local_reference: + x-calm-dsl-display-name: substrate + {{ ref.Ref() | indent(4) }} + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + min_replicas: + description: Minimum replicas for the deployment. + type: string + default: "1" + default_replicas: + x-calm-dsl-min-version: 2.9.0 + type: string + max_replicas: + description: Maximum replicas for the deployment. + type: string + default: "1" + type: + type: string + default: 'GREENFIELD' + name: + type: string + options: + x-calm-dsl-type: dict + type: object + description: + type: string + editables: + type: object + x-calm-dsl-type: object + properties: + min_replicas: + type: boolean + default_replicas: + type: boolean + max_replicas: + type: boolean + +{%- endmacro %} + + +{% macro DeploymentSchema() -%} + +Deployment: + {{ Deployment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/descriptor.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/descriptor.yaml.jinja2 new file mode 100644 index 0000000..8b854e5 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/descriptor.yaml.jinja2 @@ -0,0 +1,15 @@ +{% macro Descriptor() -%} + +title: Descriptor +type: object +x-calm-dsl-type: app_descriptor + +{%- endmacro -%} + + +{% macro DescriptorSchema() -%} + +Descriptor: + {{ Descriptor()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/endpoint.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/endpoint.yaml.jinja2 new file mode 100644 index 0000000..e79f717 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/endpoint.yaml.jinja2 @@ -0,0 +1,36 @@ +{% macro Endpoint() -%} + +title: Endpoint +type: object +x-calm-dsl-type: app_endpoint +properties: + name: + type: string + maxLength: 64 + type: + type: string + maxLength: 64 + value_type: + type: string + maxLength: 64 + provider_type: + type: string + maxLength: 64 + tunnel_reference: + x-calm-dsl-display-name: tunnel + x-calm-dsl-min-version: 3.5.0 + $ref: '#/components/schemas/CalmRef' + attrs: + additionalProperties: true + type: object + x-calm-dsl-type: dict + +{%- endmacro %} + + +{% macro EndpointSchema() -%} + +Endpoint: + {{ Endpoint()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/endpoint_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/endpoint_payload.yaml.jinja2 new file mode 100644 index 0000000..951df88 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/endpoint_payload.yaml.jinja2 @@ -0,0 +1,46 @@ +{% macro EndpointPayload() -%} + +title: EndpointPayload +type: object +x-calm-dsl-type: endpoint_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Endpoint name + description: + type: string + description: Endpoint description + resources: + type: object + properties: + $ref: '#/components/schemas/Endpoint' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: endpoint + +{%- endmacro %} + + +{% macro EndpointPayloadSchema() -%} + +EndpointPayload: + {{ EndpointPayload()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/environment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/environment.yaml.jinja2 new file mode 100644 index 0000000..0db02b0 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/environment.yaml.jinja2 @@ -0,0 +1,35 @@ +{% macro Environment() -%} + +title: Environment +type: object +x-calm-dsl-type: environment +properties: + substrate_definition_list: + x-calm-dsl-display-name: substrates + description: Substrate definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Substrate' + credential_definition_list: + x-calm-dsl-display-name: credentials + description: Credential definitions for blueprint. + type: array + items: + $ref: '#/components/schemas/Credential' + infra_inclusion_list: + x-calm-dsl-min-version: 3.2.0 + x-calm-dsl-display-name: providers + description: Accounts and resources under this environment. + type: array + items: + $ref: '#/components/schemas/AccountProvider' + +{%- endmacro %} + + +{% macro EnvironmentSchema() -%} + +Environment: + {{ Environment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/environment_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/environment_payload.yaml.jinja2 new file mode 100644 index 0000000..b518f07 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/environment_payload.yaml.jinja2 @@ -0,0 +1,58 @@ +{% macro EnvironmentPayload() -%} + +title: EnvironmentPayload +type: object +x-calm-dsl-type: app_environment_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Environment name + description: + type: string + description: Environment description + resources: + type: object + properties: + $ref: '#/components/schemas/Environment' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: environment + project_reference: + type: object + x-calm-dsl-type: object + x-calm-dsl-display-name: project + properties: + name: + type: string + kind: + type: string + uuid: + type: string + + +{%- endmacro %} + + +{% macro EnvironmentPayloadSchema() -%} + +EnvironmentPayload: + {{ EnvironmentPayload()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/jobexecutable.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/jobexecutable.yaml.jinja2 new file mode 100644 index 0000000..bf27e5e --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/jobexecutable.yaml.jinja2 @@ -0,0 +1,56 @@ +{% macro ExecutableResources() -%} +title: JobExecutable +type: object +x-calm-dsl-type: executable_resources +properties: + entity: + {{ ExecutableEntity()|indent(6) }} + action: + {{ ExecutableAction()|indent(6) }} +{%- endmacro %} + +{% macro JobExecutableSchema() -%} + +JobExecutable: + {{ ExecutableResources()|indent(2) }} + + +{%- endmacro %} + +{% macro ExecutableEntity() -%} +type: object +x-calm-dsl-type: object +title: job executable entity +properties: + type: + type: string + description: type of job + enum: ["runbook", "blueprint", "app", "mpi"] + uuid: + description: job executable entity UUID + type: string +{%- endmacro %} + +{% macro ExecutableAction() -%} +type: object +x-calm-dsl-type: object +title: job executable action +properties: + type: + type: string + description: type of action + spec: + {{ ActionSpec()|indent(6) }} + +{%- endmacro %} + +{% macro ActionSpec() -%} +type: object +x-calm-dsl-type: object +properties: + uuid: + description: job executable entity UUID + type: string + payload: + type: string +{%- endmacro %} \ No newline at end of file diff --git a/framework/calm/dsl/builtins/models/schemas/jobs.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/jobs.yaml.jinja2 new file mode 100644 index 0000000..3e4d30d --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/jobs.yaml.jinja2 @@ -0,0 +1,33 @@ +{% macro SchedulerJob() -%} + +title: Job +type: object +x-calm-dsl-type: scheduler_job +properties: + name: + type: string + description: + type: string + type: + type: string + schedule_info: + type: object + x-calm-dsl-type: job_schedule_info + skip_concurrent_execution: + x-calm-dsl-display-name: skip_exec + type: boolean + executable: + type: object + x-calm-dsl-type: executable_resources + state: + type: string + +{%- endmacro %} + +{% macro SchedulerJobSchema() -%} + +Job: + {{ SchedulerJob()|indent(2) }} + + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/jobscheduleinfo.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/jobscheduleinfo.yaml.jinja2 new file mode 100644 index 0000000..03f40f5 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/jobscheduleinfo.yaml.jinja2 @@ -0,0 +1,28 @@ +{% macro ScheduleInfo() -%} +type: object +x-calm-dsl-type: job_schedule_info +properties: + schedule: + description: job schedule in cron format + type: string + expiry_time: + description: expiration time for jobs after which they wont be executed + type: string + execution_time: + description: execution time for one time job + type: string + start_time: + description: start time for recurring job + type: string + time_zone: + description: time zone for job + type: string +{%- endmacro %} + +{% macro JobScheduleInfoSchema() -%} + +JobScheduleInfo: + {{ ScheduleInfo()|indent(2) }} + + +{%- endmacro %} \ No newline at end of file diff --git a/framework/calm/dsl/builtins/models/schemas/main.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/main.yaml.jinja2 new file mode 100644 index 0000000..d9926c2 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/main.yaml.jinja2 @@ -0,0 +1,114 @@ +{% import "port.yaml.jinja2" as port %} +{% import "service.yaml.jinja2" as service %} +{% import "provider_spec.yaml.jinja2" as provider_spec %} +{% import "substrate.yaml.jinja2" as substrate %} +{% import "deployment.yaml.jinja2" as deployment %} +{% import "config_attrs.yaml.jinja2" as config_attrs %} +{% import "config_spec.yaml.jinja2" as config_spec %} +{% import "profile.yaml.jinja2" as profile %} +{% import "blueprint.yaml.jinja2" as blueprint %} +{% import "blueprint_payload.yaml.jinja2" as blueprint_payload %} +{% import "ref.yaml.jinja2" as ref %} +{% import "task_input.yaml.jinja2" as task_input %} +{% import "descriptor.yaml.jinja2" as descriptor %} +{% import "variable.yaml.jinja2" as variable %} +{% import "package.yaml.jinja2" as package %} +{% import "credential.yaml.jinja2" as credential %} +{% import "endpoint.yaml.jinja2" as endpoint %} +{% import "endpoint_payload.yaml.jinja2" as endpoint_payload %} +{% import "task.yaml.jinja2" as task %} +{% import "runbook.yaml.jinja2" as runbook %} +{% import "runbook_service.yaml.jinja2" as runbook_service %} +{% import "runbook_payload.yaml.jinja2" as runbook_payload %} +{% import "action.yaml.jinja2" as action %} +{% import "project.yaml.jinja2" as project %} +{% import "project_payload.yaml.jinja2" as project_payload %} +{% import "simple_deployment.yaml.jinja2" as simple_deployment %} +{% import "simple_blueprint.yaml.jinja2" as simple_blueprint %} +{% import "published_service.yaml.jinja2" as published_service %} +{% import "pod_deployment.yaml.jinja2" as pod_deployment %} +{% import "ahv_vm.yaml.jinja2" as ahv_vm %} +{% import "simple_pod_deployment.yaml.jinja2" as simple_pod_deployment %} +{% import "vm_disk_package.yaml.jinja2" as vm_disk_package %} +{% import "readiness_probe.yaml.jinja2" as readiness_probe %} +{% import "metadata.yaml.jinja2" as metadata %} +{% import "brownfield_vm.yaml.jinja2" as brownfield_vm %} +{% import "brownfield_deployment.yaml.jinja2" as brownfield_deployment %} +{% import "environment.yaml.jinja2" as environment %} +{% import "environment_payload.yaml.jinja2" as environment_payload %} +{% import "vm_profile.yaml.jinja2" as vm_profile %} +{% import "vm_blueprint.yaml.jinja2" as vm_blueprint %} +{% import "calm_ref.yaml.jinja2" as calm_ref %} +{% import "account_provider.yaml.jinja2" as account_provider %} +{% import "ahv_recovery_vm.yaml.jinja2" as ahv_recovery_vm %} +{% import "jobs.yaml.jinja2" as scheduler_job %} +{% import "jobexecutable.yaml.jinja2" as job_executable %} +{% import "jobscheduleinfo.yaml.jinja2" as job_schedule %} +{% import "network_group_tunnel.yaml.jinja2" as network_group_tunnel %} +{% import "network_group_tunnel_payload.yaml.jinja2" as network_group_tunnel_payload %} +{% macro Schemas() -%} + +{{ config_attrs.AhvDiskRulesetSchema() }} +{{ config_attrs.AhvNicRulesetSchema() }} +{{ config_attrs.PatchDataFieldSchema() }} +{{ config_attrs.ConfigAttrsSchema() }} +{{ config_spec.ConfigSpecSchema() }} +{{ port.PortSchema() }} +{{ service.ServiceSchema() }} +{{ published_service.PublishedServiceSchema() }} +{{ provider_spec.ProviderSpecSchema() }} +{{ substrate.SubstrateSchema() }} +{{ deployment.DeploymentSchema() }} +{{ pod_deployment.PODDeploymentSchema() }} +{{ simple_pod_deployment.SimplePODDeploymentSchema() }} +{{ profile.ProfileSchema() }} +{{ blueprint.BlueprintSchema() }} +{{ blueprint_payload.BlueprintPayloadSchema() }} +{{ ref.RefSchema() }} +{{ task_input.TaskInputSchema() }} +{{ descriptor.DescriptorSchema() }} +{{ variable.VariableSchema() }} +{{ package.PackageSchema() }} +{{ credential.CredentialSchema() }} +{{ endpoint.EndpointSchema() }} +{{ endpoint_payload.EndpointPayloadSchema() }} +{{ task.TaskSchema() }} +{{ runbook.RunbookSchema() }} +{{ runbook_service.RunbookServiceSchema() }} +{{ runbook_payload.RunbookPayloadSchema() }} +{{ action.ActionSchema() }} +{{ project.ProjectSchema() }} +{{ project_payload.ProjectPayloadSchema() }} +{{ simple_deployment.SimpleDeploymentSchema() }} +{{ simple_blueprint.SimpleBlueprintSchema() }} +{{ ahv_vm.AhvVmSchema() }} +{{ vm_disk_package.VmDiskPackageSchema() }} +{{ readiness_probe.ReadinessProbeSchema() }} +{{ metadata.MetadataSchema() }} +{{ brownfield_vm.BrownfieldVmSchema() }} +{{ brownfield_deployment.BrownfieldDeploymentSchema() }} +{{ environment.EnvironmentSchema() }} +{{ environment_payload.EnvironmentPayloadSchema() }} +{{ vm_profile.VmProfileSchema() }} +{{ vm_blueprint.VmBlueprintSchema() }} +{{ calm_ref.CalmRefSchema() }} +{{ account_provider.AccountProviderSchema() }} +{{ ahv_recovery_vm.AhvRecoveryVmSchema() }} +{{ job_executable.JobExecutableSchema() }} +{{ job_schedule.JobScheduleInfoSchema() }} +{{ scheduler_job.SchedulerJobSchema() }} +{{ network_group_tunnel.NetworkGroupTunnelVMSpecSchema() }} +{{ network_group_tunnel.NetworkGroupTunnelSchema() }} +{{ network_group_tunnel_payload.NetworkGroupTunnelPayloadSchema() }} +{{ network_group_tunnel_payload.NetworkGroupTunnelVMPayloadSchema() }} +{%- endmacro %} + + +info: + title: Calm v3 API schema with calm.dsl extenstions. + description: Latest stable calm schemas. Later to be obtained from the openapi spec. + version: 2.6.0 + +components: + schemas: + {{ Schemas()|indent(4) }} diff --git a/framework/calm/dsl/builtins/models/schemas/metadata.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/metadata.yaml.jinja2 new file mode 100644 index 0000000..9516bee --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/metadata.yaml.jinja2 @@ -0,0 +1,48 @@ +{% macro Metadata() -%} + +title: Metadata +type: object +x-calm-dsl-type: app_metadata +properties: + name: + type: string + kind: + type: string + project_reference: + type: object + x-calm-dsl-type: object + x-calm-dsl-display-name: project + properties: + name: + type: string + kind: + type: string + uuid: + type: string + owner_reference: + type: object + x-calm-dsl-type: object + x-calm-dsl-display-name: owner + properties: + name: + type: string + kind: + type: string + uuid: + type: string + categories: + type: object + x-calm-dsl-type: dict + spec_version: + type: integer + default: 1 + +{%- endmacro %} + + +{% macro MetadataSchema() -%} + +Metadata: + {{ Metadata()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/network_group_tunnel.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/network_group_tunnel.yaml.jinja2 new file mode 100644 index 0000000..6921293 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/network_group_tunnel.yaml.jinja2 @@ -0,0 +1,56 @@ +{% import "ref.yaml.jinja2" as ref %} +{% import "calm_ref.yaml.jinja2" as calm_ref %} + +{% macro NetworkGroupTunnelVMSpec() %} +title: Network Group Tunnel VM Spec +type: object +x-calm-dsl-type: network_group_tunnel_vm_spec +properties: + cluster: + type: string + vm_name: + type: string + subnet: + type: string + type: + type: string +{%- endmacro %} + +{% macro NetworkGroupTunnelVMSpecSchema() -%} + +NetworkGroupTunnelVMSpec: + {{ NetworkGroupTunnelVMSpec() | indent(2) }} + +{%- endmacro %} + + +{% macro NetworkGroupTunnelSpec() -%} + +title: Network Group Tunnel Spec +type: object +x-calm-dsl-type: network_group_tunnel +properties: + account_reference: + x-calm-dsl-display-name: account + {{ calm_ref.CalmRef() | indent(4) }} + platform_vpc_uuid_list: + x-calm-dsl-display-name: platform_vpcs + description: List of VPC UUIDs + type: array + items: + $ref: '#/components/schemas/CalmRef' + tunnel_reference: + x-calm-dsl-display-name: tunnel_name + type: string + tunnel_vm_spec: + x-calm-dsl-display-name: tunnel_vm_spec + {{ NetworkGroupTunnelVMSpec() | indent(4) }} +{%- endmacro %} + + +{% macro NetworkGroupTunnelSchema() -%} + +NetworkGroupTunnel: + {{ NetworkGroupTunnelSpec() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/network_group_tunnel_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/network_group_tunnel_payload.yaml.jinja2 new file mode 100644 index 0000000..a06c6d1 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/network_group_tunnel_payload.yaml.jinja2 @@ -0,0 +1,82 @@ +{% macro NetworkGroupTunnelPayload() -%} +title: NetworkGroupTunnelPayload +type: object +x-calm-dsl-type: app_network_group_tunnel_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Network Group name + description: + type: string + description: Network Group description + resources: + type: object + properties: + $ref: '#/components/schemas/NetworkGroupTunnel' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: network_group_tunnel + +{%- endmacro %} + + +{% macro NetworkGroupTunnelPayloadSchema() -%} + +NetworkGroupTunnelPayload: + {{ NetworkGroupTunnelPayload()|indent(2) }} + +{%- endmacro %} + +{% macro NetworkGroupTunnelVMPayload() -%} +title: NetworkGroupTunnelVMPayload +type: object +x-calm-dsl-type: app_network_group_tunnel_vm_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + resources: + type: object + properties: + $ref: '#/components/schemas/NetworkGroupTunnelVMSpec' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: network_group_tunnel_vm +{%- endmacro %} + + +{% macro NetworkGroupTunnelVMPayloadSchema() -%} +NetworkGroupTunnelVMPayload: + {{ NetworkGroupTunnelVMPayload() | indent(2) }} +{%- endmacro %} \ No newline at end of file diff --git a/framework/calm/dsl/builtins/models/schemas/package.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/package.yaml.jinja2 new file mode 100644 index 0000000..e8be75e --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/package.yaml.jinja2 @@ -0,0 +1,93 @@ +{% macro Package() -%} + +title: Package +type: object +x-calm-dsl-type: app_package +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + type: + type: string + default: CUSTOM + options: + additionalProperties: true + type: object + x-calm-dsl-type: object + properties: + install_runbook: + type: object + x-calm-dsl-type: dict + uninstall_runbook: + type: object + x-calm-dsl-type: dict + name: + type: string + description: + type: string + resources: + type: object + x-calm-dsl-type: object + properties: + image_type: + type: string + enum: [DISK_IMAGE, ISO_IMAGE] + source_uri: + type: string + architecture: + type: string + version: + type: object + x-calm-dsl-type: object + properties: + product_version: + type: string + product_name: + type: string + checksum: + type: object + x-calm-dsl-type: object + properties: + checksum_algorithm: + type: string + enum: ['SHA_1', 'SHA_256', ''] + checksum_value: + type: string + + # TODO - fix this mess! + default: {"install_runbook": {}, "uninstall_runbook": {}} + service_local_reference_list: + type: array + x-calm-dsl-display-name: services + items: + $ref: '#/components/schemas/Ref' + variable_list: + type: array + x-calm-dsl-display-name: variables + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + image_spec: + x-calm-dsl-type: dict + type: object + version: + type: string + +{%- endmacro %} + + +{% macro PackageSchema() -%} + +Package: + {{ Package()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/pod_deployment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/pod_deployment.yaml.jinja2 new file mode 100644 index 0000000..75694e5 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/pod_deployment.yaml.jinja2 @@ -0,0 +1,34 @@ +{% macro PODDeployment() -%} + +title: PODDeployment +type: object +x-calm-dsl-type: app_pod_deployment +properties: + type: + type: string + default: K8S_DEPLOYMENT + containers: + type: array + items: + $ref: '#/components/schemas/Service' + deployment_spec: + x-calm-dsl-type: dict + type: object + service_spec: + x-calm-dsl-type: dict + type: object + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + +{%- endmacro %} + + +{% macro PODDeploymentSchema() -%} + +PODDeployment: + {{ PODDeployment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/port.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/port.yaml.jinja2 new file mode 100644 index 0000000..8711b32 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/port.yaml.jinja2 @@ -0,0 +1,30 @@ +{% macro Port() -%} + +title: Port +type: object +x-calm-dsl-type: app_port +properties: + target_port: + type: string + protocol: + type: string + default: 'SSH' + endpoint_name: + type: string + default: '' + exposed_address: + type: string + default: '' + exposed_port: + type: string + default: '' + +{%- endmacro %} + + +{% macro PortSchema() -%} + +Port: + {{ Port()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/profile.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/profile.yaml.jinja2 new file mode 100644 index 0000000..f3fc99f --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/profile.yaml.jinja2 @@ -0,0 +1,61 @@ +{% macro Profile() -%} + +title: Profile +type: object +x-calm-dsl-type: app_profile +properties: + name: + type: string + description: + type: string + # description attribute in profile gives bp launch error: https://jira.nutanix.com/browse/CALM-19380 + environment_reference_list: + type: array + x-calm-dsl-min-version: 3.2.0 + x-calm-dsl-display-name: environments + items: + type: object + x-calm-dsl-type: app_calm_ref + deployment_create_list: + x-calm-dsl-display-name: deployments + type: array + items: + $ref: '#/components/schemas/Deployment' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + patch_list: + x-calm-dsl-min-version: 3.3.0 + x-calm-dsl-display-name: patch_list + type: array + items: + $ref: '#/components/schemas/ConfigSpec' + + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + snapshot_config_list: + x-calm-dsl-display-name: snapshot_configs + x-calm-dsl-min-version: 3.3.0 + type: array + items: + $ref: '#/components/schemas/ConfigSpec' + restore_config_list: + x-calm-dsl-display-name: restore_configs + x-calm-dsl-min-version: 3.3.0 + type: array + items: + $ref: '#/components/schemas/ConfigSpec' +{%- endmacro %} + + +{% macro ProfileSchema() -%} + +Profile: + {{ Profile()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/project.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/project.yaml.jinja2 new file mode 100644 index 0000000..f8853b2 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/project.yaml.jinja2 @@ -0,0 +1,77 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro ProjectSpec() -%} + +title: Project Spec +type: object +x-calm-dsl-type: project +properties: + provider_list: + x-calm-dsl-display-name: providers + type: array + items: + $ref: '#/components/schemas/AccountProvider' + user_reference_list: + x-calm-dsl-display-name: users + type: array + items: + type: object + x-calm-dsl-type: object + properties: + kind: + type: string + name: + type: string + uuid: + type: string + environment_reference_list: + x-calm-dsl-display-name: env_refs + type: array + items: + $ref: '#/components/schemas/Ref' + default_environment_reference: + x-calm-dsl-display-name: default_environment + x-calm-dsl-min-version: 3.2.0 + {{ ref.Ref() | indent(4) }} + environment_definition_list: + x-calm-dsl-display-name: envs + description: Inline definitions of environment in project + type: array + items: + $ref: '#/components/schemas/Environment' + external_user_group_reference_list: + x-calm-dsl-display-name: groups + type: array + items: + type: object + x-calm-dsl-type: object + properties: + kind: + type: string + name: + type: string + uuid: + type: string + quotas: + type: object + x-calm-dsl-type: object + properties: + VCPUS: + x-calm-dsl-display-name: vcpus + type: integer + STORAGE: + x-calm-dsl-display-name: storage + type: integer + MEMORY: + x-calm-dsl-display-name: memory + type: integer + +{%- endmacro %} + + +{% macro ProjectSchema() -%} + +Project: + {{ ProjectSpec() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/project_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/project_payload.yaml.jinja2 new file mode 100644 index 0000000..4950ede --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/project_payload.yaml.jinja2 @@ -0,0 +1,46 @@ +{% macro ProjectPayload() -%} + +title: ProjectPayload +type: object +x-calm-dsl-type: app_project_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Project name + description: + type: string + description: Project description + resources: + type: object + properties: + $ref: '#/components/schemas/Project' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: project + +{%- endmacro %} + + +{% macro ProjectPayloadSchema() -%} + +ProjectPayload: + {{ ProjectPayload()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/provider_spec.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/provider_spec.yaml.jinja2 new file mode 100644 index 0000000..8f69fd3 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/provider_spec.yaml.jinja2 @@ -0,0 +1,15 @@ +{% macro ProviderSpec() -%} + +title: ProviderSpec +type: object +x-calm-dsl-type: app_provider_spec + +{%- endmacro -%} + + +{% macro ProviderSpecSchema() -%} + +ProviderSpec: + {{ ProviderSpec()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/published_service.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/published_service.yaml.jinja2 new file mode 100644 index 0000000..98a1f71 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/published_service.yaml.jinja2 @@ -0,0 +1,58 @@ +{% macro PublishedService() -%} + +title: PublishedService +type: object +x-calm-dsl-type: app_published_service +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + type: + type: string + default: K8S_SERVICE + enum: [K8S_SERVICE] + port_list: + x-calm-dsl-display-name: ports + type: array + items: + $ref: '#/components/schemas/Port' + default: [] + singleton: + type: boolean + default: true + tier: + type: string + default: '' + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + options: # Improve this validation + x-calm-dsl-type: dict + type: object + +{%- endmacro %} + + +{% macro PublishedServiceSchema() -%} + +PublishedService: + {{ PublishedService()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/readiness_probe.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/readiness_probe.yaml.jinja2 new file mode 100644 index 0000000..a27874a --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/readiness_probe.yaml.jinja2 @@ -0,0 +1,48 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro ReadinessProbe() -%} + +title: Readiness Probe +type: object +x-calm-dsl-type: app_readiness_probe +properties: + connection_type: + type: string + enum: + - SSH + - POWERSHELL + connection_port: + type: integer + connection_protocol: + type: string + login_credential_local_reference: + x-calm-dsl-display-name: credential + {{ ref.Ref() | indent(4) }} + timeout_secs: + type: string + delay_secs: + type: string + default: '60' + retries: + type: string + default: '5' + address: + type: string + disable_readiness_probe: + x-calm-dsl-display-name: disabled + default: true + type: boolean + editables_list: + type: array + items: + type: string + +{%- endmacro %} + + +{% macro ReadinessProbeSchema() -%} + +ReadinessProbe: + {{ ReadinessProbe()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/ref.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/ref.yaml.jinja2 new file mode 100644 index 0000000..bd73203 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/ref.yaml.jinja2 @@ -0,0 +1,22 @@ +{% macro Ref() -%} + +title: Reference +type: object +x-calm-dsl-type: app_ref +required: + - kind +properties: + kind: + type: string + name: + type: string + +{%- endmacro -%} + + +{% macro RefSchema() -%} + +Ref: + {{ Ref()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/runbook.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/runbook.yaml.jinja2 new file mode 100644 index 0000000..38ddf78 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/runbook.yaml.jinja2 @@ -0,0 +1,31 @@ +{% macro Runbook() -%} +title: Runbook +type: object +x-calm-dsl-type: app_runbook +properties: + name: + type: string + description: + type: string + main_task_local_reference: + $ref: '#/components/schemas/Ref' + task_definition_list: + x-calm-dsl-display-name: tasks + type: array + items: + $ref: "#/components/schemas/Task" + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + +{%- endmacro %} + + +{% macro RunbookSchema() -%} + +Runbook: + {{ Runbook()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/runbook_payload.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/runbook_payload.yaml.jinja2 new file mode 100644 index 0000000..6790a6d --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/runbook_payload.yaml.jinja2 @@ -0,0 +1,46 @@ +{% macro RunbookPayload() -%} + +title: RunbookPayload +type: object +x-calm-dsl-type: runbook_payload +properties: + spec: + type: object + x-calm-dsl-type: dict + properties: + name: + type: string + description: Runbook name + description: + type: string + description: Runbook description + resources: + type: object + properties: + $ref: '#/components/schemas/RunbookService' + + api_version: + type: string + default: "3.0" + + metadata: + type: object + x-calm-dsl-type: dict + additionalProperties: true + properties: + spec_version: + type: integer + default: 1 + kind: + type: string + default: action + +{%- endmacro %} + + +{% macro RunbookPayloadSchema() -%} + +RunbookPayload: + {{ RunbookPayload()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/runbook_service.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/runbook_service.yaml.jinja2 new file mode 100644 index 0000000..f73bfde --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/runbook_service.yaml.jinja2 @@ -0,0 +1,36 @@ +{% macro RunbookService() -%} +title: RunbookService +type: object +x-calm-dsl-type: runbook_service +properties: + endpoint_definition_list: + x-calm-dsl-display-name: endpoints + description: Endpoint definitions for runbook service. + type: array + items: + $ref: '#/components/schemas/Endpoint' + credential_definition_list: + x-calm-dsl-display-name: credentials + description: Credential definitions for runbook service. + type: array + items: + $ref: '#/components/schemas/Credential' + client_attrs: + type: object + additionalProperties: true + x-calm-dsl-type: dict + runbook: + $ref: '#/components/schemas/Runbook' + default_target_reference: + type: object + x-calm-dsl-type: app_ref + x-calm-dsl-display-name: default_target +{%- endmacro %} + + +{% macro RunbookServiceSchema() -%} + +RunbookService: + {{ RunbookService()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/service.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/service.yaml.jinja2 new file mode 100644 index 0000000..ca2fdb4 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/service.yaml.jinja2 @@ -0,0 +1,54 @@ +{% macro Service() -%} + +title: Service +type: object +x-calm-dsl-type: app_service +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + port_list: + x-calm-dsl-display-name: ports + type: array + items: + $ref: '#/components/schemas/Port' + default: [] + singleton: + type: boolean + default: False + tier: + type: string + default: '' + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + container_spec: + x-calm-dsl-type: dict + type: object + +{%- endmacro %} + + +{% macro ServiceSchema() -%} + +Service: + {{ Service()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/simple_blueprint.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/simple_blueprint.yaml.jinja2 new file mode 100644 index 0000000..090cdd1 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/simple_blueprint.yaml.jinja2 @@ -0,0 +1,48 @@ +{% macro SimpleBlueprint() -%} + +title: SimpleBlueprint +type: object +x-calm-dsl-type: app_simple_blueprint +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + environment_reference_list: + type: array + x-calm-dsl-min-version: 3.2.0 + x-calm-dsl-display-name: environments + items: + type: object + x-calm-dsl-type: app_calm_ref + credentials: + type: array + items: + $ref: '#/components/schemas/Credential' + deployments: + type: array + items: + $ref: '#/components/schemas/SimpleDeployment' + variables: + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + +{%- endmacro %} + + +{% macro SimpleBlueprintSchema() -%} + +SimpleBlueprint: + {{ SimpleBlueprint()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/simple_deployment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/simple_deployment.yaml.jinja2 new file mode 100644 index 0000000..dd0a132 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/simple_deployment.yaml.jinja2 @@ -0,0 +1,93 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro SimpleDeployment() -%} + +title: SimpleDeployment +type: object +x-calm-dsl-type: app_blueprint_simple_deployment +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + additionalProperties: true + type: object + x-calm-dsl-type: dict + provider_type: + type: string + default: 'AHV_VM' + os_type: + type: string + default: 'Linux' + provider_spec: + type: object + x-calm-dsl-type: app_provider_spec + readiness_probe: + type: object + x-calm-dsl-type: object + properties: + connection_type: + type: string + default: SSH + enum: [SSH, POWERSHELL] + connection_port: + type: integer + default: 22 + login_credential_local_reference: + x-calm-dsl-display-name: credential + {{ ref.Ref() | indent(8) }} + timeout_secs: + type: string + delay_secs: + type: string + retries: + type: string + default: "5" + address: + type: string + disable_readiness_probe: + x-calm-dsl-display-name: disabled + default: false + type: boolean + min_replicas: + description: Minimum replicas for the deployment. + type: string + default: "1" + max_replicas: + description: Maximum replicas for the deployment. + type: string + default: "1" + deployment_spec: + type: object + x-calm-dsl-type: dict + service_spec: + type: object + x-calm-dsl-type: dict + +{%- endmacro %} + + +{% macro SimpleDeploymentSchema() -%} + +SimpleDeployment: + {{ SimpleDeployment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/simple_pod_deployment.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/simple_pod_deployment.yaml.jinja2 new file mode 100644 index 0000000..cebc343 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/simple_pod_deployment.yaml.jinja2 @@ -0,0 +1,35 @@ +{% macro SimplePODDeployment() -%} + +title: SimplePODDeployment +type: object +x-calm-dsl-type: app_simple_pod_deployment +properties: + type: + type: string + default: K8S_DEPLOYMENT + deployment_spec: + x-calm-dsl-type: dict + type: object + service_spec: + x-calm-dsl-type: dict + type: object + depends_on_list: + x-calm-dsl-display-name: dependencies + type: array + items: + $ref: '#/components/schemas/Ref' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + +{%- endmacro %} + + +{% macro SimplePODDeploymentSchema() -%} + +SimplePODDeployment: + {{ SimplePODDeployment()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/substrate.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/substrate.yaml.jinja2 new file mode 100644 index 0000000..d0aa83b --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/substrate.yaml.jinja2 @@ -0,0 +1,60 @@ +{% macro Substrate() -%} + +title: Substrate +type: object +x-calm-dsl-type: app_substrate +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + type: + x-calm-dsl-display-name: provider_type + type: string + default: 'AHV_VM' + os_type: + type: string + default: 'Linux' + account_reference: + type: object + x-calm-dsl-type: app_calm_ref + x-calm-dsl-display-name: account + x-calm-dsl-min-version: 3.2.0 + vm_recovery_spec: + type: object + x-calm-dsl-type: recovery_vm_ahv_spec + x-calm-dsl-min-version: 3.3.0 + create_spec: + x-calm-dsl-display-name: provider_spec + type: object + x-calm-dsl-type: app_provider_spec + editables: + x-calm-dsl-display-name: provider_spec_editables + type: object + x-calm-dsl-type: dict + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + type: array + items: + $ref: '#/components/schemas/Action' + readiness_probe: + $ref: '#/components/schemas/ReadinessProbe' + +{%- endmacro %} + + +{% macro SubstrateSchema() -%} + +Substrate: + {{ Substrate()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/task.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/task.yaml.jinja2 new file mode 100644 index 0000000..3319f64 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/task.yaml.jinja2 @@ -0,0 +1,47 @@ +{% macro Task() -%} + +title: Task +type: object +x-calm-dsl-type: app_task +properties: + name: + type: string + description: + type: string + type: + type: string + target_any_local_reference: + $ref: '#/components/schemas/Ref' + exec_target_reference: + $ref: '#/components/schemas/Ref' + attrs: + type: object + additionalProperties: true + x-calm-dsl-type: dict + child_tasks_local_reference_list: + type: array + items: + $ref: '#/components/schemas/Ref' + variable_list: + x-calm-dsl-display-name: variables + type: array + items: + $ref: '#/components/schemas/Variable' + retries: + type: string + timeout_secs: + type: string + inherit_target: + type: boolean + x-calm-dsl-min-version: 3.5.0 + x-calm-dsl-default-required: false + +{%- endmacro %} + + +{% macro TaskSchema() -%} + +Task: + {{ Task()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/task_input.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/task_input.yaml.jinja2 new file mode 100644 index 0000000..3f6e62d --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/task_input.yaml.jinja2 @@ -0,0 +1,29 @@ +{% macro TaskInput() -%} + +title: TaskInput +type: object +x-calm-dsl-type: task_input +required: + - name +properties: + input_type: + type: string + default: "text" + enum: ["text", "password", "checkbox", "select", "selectmultiple", "date", "time", "datetime", "file"] + name: + type: string + options: + type: array + items: + type: string + + +{%- endmacro -%} + + +{% macro TaskInputSchema() -%} + +TaskInput: + {{ TaskInput()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/variable.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/variable.yaml.jinja2 new file mode 100644 index 0000000..ac64dde --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/variable.yaml.jinja2 @@ -0,0 +1,78 @@ +{% import "ref.yaml.jinja2" as ref %} + +{% macro Variable() -%} + +title: Variable +type: object +x-calm-dsl-type: app_variable +properties: + name: + description: name + type: string + maxLength: 64 + description: + type: string + maxLength: 1000 + type: + type: string + enum: [LOCAL, SECRET, EXTERNAL, EXEC_LOCAL, HTTP_LOCAL, EXEC_SECRET, HTTP_SECRET, EXEC_EXTERNAL, HTTP_EXTERNAL] + default: LOCAL + label: + type: string + attrs: + additionalProperties: true + type: object + x-calm-dsl-type: dict + val_type: + x-calm-dsl-display-name: value_type + type: string + enum: [STRING, INT, DICT, DATE, TIME, DATE_TIME, MULTILINE_STRING] + default: STRING + value: + type: string + data_type: + type: string + enum: [BASE, LIST, SINGLE_SELECT_LIST] + default: BASE + editables: + additionalProperties: true + type: object + x-calm-dsl-type: dict + regex: + type: object + x-calm-dsl-type: object + properties: + value: + type: string + should_validate: + type: boolean + options: + type: object + x-calm-dsl-type: object + properties: + type: + type: string + enum: [PREDEFINED, EXEC, HTTP] + choices: + type: array + items: + type: string + attrs: + additionalProperties: true + type: object + x-calm-dsl-type: dict + + is_hidden: + type: boolean + is_mandatory: + type: boolean + +{%- endmacro %} + + +{% macro VariableSchema() -%} + +Variable: + {{ Variable()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/vm_blueprint.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/vm_blueprint.yaml.jinja2 new file mode 100644 index 0000000..e4a953d --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/vm_blueprint.yaml.jinja2 @@ -0,0 +1,37 @@ +{% macro VmBlueprint() -%} + +title: VmBlueprint +type: object +x-calm-dsl-type: app_vm_blueprint +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + credentials: + type: array + items: + $ref: '#/components/schemas/Credential' + type: + type: string + default: 'USER' + vm_profile_definition_list: + x-calm-dsl-display-name: profiles + description: Profile definitions for blueprint + type: array + items: + $ref: '#/components/schemas/VmProfile' + +{%- endmacro %} + + +{% macro VmBlueprintSchema() -%} + +VmBlueprint: + {{ VmBlueprint()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/vm_disk_package.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/vm_disk_package.yaml.jinja2 new file mode 100644 index 0000000..8896672 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/vm_disk_package.yaml.jinja2 @@ -0,0 +1,55 @@ +{% macro VmDiskPackage() -%} + +title: Vm Disk Package +type: object +x-calm-dsl-type: app_vm_disk_package +properties: + name: + type: string + maxLength: 64 + default: '' + description: + type: string + maxLength: 1000 + default: '' + image: + type: object + x-calm-dsl-type: object + properties: + name: + type: string + type: + type: string + default: DISK_IMAGE + source_uri: + x-calm-dsl-display-name: source + type: string + architecture: + type: string + default: X86_64 + product: + type: object + x-calm-dsl-type: object + properties: + name: + type: string + version: + type: string + checksum: + type: object + x-calm-dsl-type: object + properties: + algorithm: + type: string + value: + type: string + +{%- endmacro %} + + +{% macro VmDiskPackageSchema() -%} + +VmDiskPackage: + {{ VmDiskPackage() | indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/schemas/vm_profile.yaml.jinja2 b/framework/calm/dsl/builtins/models/schemas/vm_profile.yaml.jinja2 new file mode 100644 index 0000000..dcb72c9 --- /dev/null +++ b/framework/calm/dsl/builtins/models/schemas/vm_profile.yaml.jinja2 @@ -0,0 +1,57 @@ +{% macro VmProfile() -%} + +title: VmProfile +type: object +x-calm-dsl-type: app_vm_profile +properties: + name: + type: string + maxLength: 64 + default: '' + environment_reference_list: + type: array + x-calm-dsl-min-version: 3.2.0 + x-calm-dsl-display-name: environments + items: + type: object + x-calm-dsl-type: app_calm_ref + os_type: + type: string + default: 'Linux' + provider_type: + type: string + default: 'AHV_VM' + provider_spec: + type: object + x-calm-dsl-type: app_provider_spec + readiness_probe: + $ref: '#/components/schemas/ReadinessProbe' + min_replicas: + description: Minimum replicas for the deployment. + type: string + default: "1" + max_replicas: + description: Maximum replicas for the deployment. + type: string + default: "1" + variables: + type: array + description: Profile variables + items: + $ref: '#/components/schemas/Variable' + action_list: + x-calm-dsl-display-name: actions + description: Profile, Package and Substrate actions + type: array + items: + $ref: '#/components/schemas/Action' + +{%- endmacro %} + + +{% macro VmProfileSchema() -%} + +VmProfile: + {{ VmProfile()|indent(2) }} + +{%- endmacro %} diff --git a/framework/calm/dsl/builtins/models/service.py b/framework/calm/dsl/builtins/models/service.py new file mode 100644 index 0000000..8001973 --- /dev/null +++ b/framework/calm/dsl/builtins/models/service.py @@ -0,0 +1,109 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + +from .task import dag +from .action import runbook_create, _action_create, action + + +# Service + + +class ServiceType(EntityType): + __schema_name__ = "Service" + __openapi_type__ = "app_service" + + ALLOWED_SYSTEM_ACTIONS = { + "__create__": "action_create", + "__delete__": "action_delete", + "__start__": "action_start", + "__stop__": "action_stop", + "__restart__": "action_restart", + "__soft_delete__": "action_soft_delete", + } + + def get_task_target(cls): + return cls.get_ref() + + @classmethod + def pre_decompile(mcls, cdict, context, prefix=""): + cdict = super().pre_decompile(cdict, context, prefix=prefix) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + def compile(cls): + + cdict = super().compile() + + def make_empty_runbook(action_name): + suffix = getattr(cls, "name", "") or cls.__name__ + user_dag = dag( + name="DAG_Task_for_Service_{}_{}".format(suffix, action_name), + target=cls.get_task_target(), + ) + return runbook_create( + name="Runbook_for_Service_{}_{}".format(suffix, action_name), + main_task_local_reference=user_dag.get_ref(), + tasks=[user_dag], + ) + + compulsory_actions = list(cls.ALLOWED_SYSTEM_ACTIONS.values()) + for action_obj in cdict["action_list"]: + if action_obj.__name__ in compulsory_actions: + compulsory_actions.remove(action_obj.__name__) + + for action_name in compulsory_actions: + user_action = _action_create( + **{ + "name": action_name, + "description": "", + "critical": True, + "type": "system", + "runbook": make_empty_runbook(action_name), + } + ) + cdict["action_list"].append(user_action) + + return cdict + + +class ServiceValidator(PropertyValidator, openapi_type="app_service"): + __default__ = None + __kind__ = ServiceType + + +def service(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return ServiceType(name, bases, kwargs) + + +Service = service() + + +class BaseService(Service): + @action + def __create__(): + pass + + @action + def __start__(): + pass + + @action + def __stop__(): + pass + + @action + def __delete__(): + pass + + @action + def __restart__(): + pass + + @action + def __soft_delete__(): + pass diff --git a/framework/calm/dsl/builtins/models/simple_blueprint.py b/framework/calm/dsl/builtins/models/simple_blueprint.py new file mode 100644 index 0000000..544d5ab --- /dev/null +++ b/framework/calm/dsl/builtins/models/simple_blueprint.py @@ -0,0 +1,258 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + +from .profile import profile +from .deployment import deployment +from .simple_pod_deployment import simple_pod_deployment +from .provider_spec import provider_spec as get_provider_spec +from .substrate import substrate +from .service import service +from .package import package +from .ref import ref +from .action import action as Action + +# Simple Blueprint + + +class SimpleBlueprintType(EntityType): + __schema_name__ = "SimpleBlueprint" + __openapi_type__ = "app_simple_blueprint" + __has_dag_target__ = False + + def get_task_target(cls): + return + + def make_bp_dict(cls, categories=None): + + deployments = getattr(cls, "deployments", []) + + pod_deployments = [] + normal_deployments = [] + for dep in deployments: + if dep.deployment_spec and dep.service_spec: + pod_dep = simple_pod_deployment( + name=getattr(dep, "name", "") or dep.__name__, + service_spec=dep.service_spec, + deployment_spec=dep.deployment_spec, + dependencies=dep.dependencies, + ) + + pod_deployments.append(pod_dep) + for key, value in dep.__dict__.items(): + if isinstance(value, Action): + setattr(pod_dep, key, value) + + else: + normal_deployments.append(dep) + + # Removing pod deployments from the deployments + setattr(cls, "deployments", normal_deployments) + + # Get default credential from credential list + default_cred = None + for cred in cls.credentials: + if cred.default: + default_cred = cred.get_ref() + break + + # Get simple blueprint dictionary + cdict = cls.get_dict() + + # Create blueprint objects + + credential_definition_list = cdict["credentials"] + + pfl_kwargs = {"name": cls.__name__ + "Profile"} + + environments = getattr(cls, "environments", None) + if environments: + pfl_kwargs["environments"] = environments + + # Init Profile + pro = profile(**pfl_kwargs) + app_profile = pro.get_dict() + app_profile["variable_list"] = cdict["variables"] + app_profile["action_list"] = cdict["action_list"] + app_profile_list = [app_profile] + + service_definition_list = [] + package_definition_list = [] + substrate_definition_list = [] + published_service_definition_list = [] + + for sd in cdict["deployments"]: + + # Init service dict + s = service(name=sd["name"] + "Service", description=sd["description"]) + sdict = s.get_dict() + sdict["variable_list"] = sd["variable_list"] + + compulsory_actions = sdict.pop("action_list", []) + existing_system_actions = [] + sdict["action_list"] = [] # Initializing by empty list + for action in sd["action_list"]: + if action["name"].startswith("__") and action["name"].endswith("__"): + if action["name"] in s.ALLOWED_SYSTEM_ACTIONS: + action["name"] = s.ALLOWED_SYSTEM_ACTIONS[action["name"]] + action["type"] = "system" + action["critical"] = True + existing_system_actions.append(action["name"]) + else: + continue + sdict["action_list"].append(action) + + # Adding compulsory action action, if not supplied by user + for action in compulsory_actions: + if action["name"] not in existing_system_actions: + sdict["action_list"].append(action) + + # Init package dict + p = package(name=sd["name"] + "Package") + p.services = [ref(s)] + pdict = p.get_dict() + for action in sd["action_list"]: + if action["name"] == "__install__": + pdict["options"]["install_runbook"] = action["runbook"] + elif action["name"] == "__uninstall__": + pdict["options"]["uninstall_runbook"] = action["runbook"] + + # Init Substrate dict + sub = substrate( + name=sd["name"] + "Substrate", + provider_type=sd["provider_type"], + provider_spec=get_provider_spec(sd["provider_spec"]), + readiness_probe=sd["readiness_probe"], + os_type=sd["os_type"], + ) + subdict = sub.get_dict() + + for action in sd["action_list"]: + if action["name"] == "__pre_create__": + action["name"] = sub.ALLOWED_FRAGMENT_ACTIONS["__pre_create__"] + action["type"] = "fragment" + + for task in action["runbook"]["task_definition_list"]: + if task["target_any_local_reference"]: + task["target_any_local_reference"] = { + "kind": "app_substrate", + "name": subdict["name"], + } + + subdict["action_list"].append(action) + + elif action["name"] == "__post_delete__": + action["name"] = sub.ALLOWED_FRAGMENT_ACTIONS["__post_delete__"] + action["type"] = "fragment" + + for task in action["runbook"]["task_definition_list"]: + if task["target_any_local_reference"]: + task["target_any_local_reference"] = { + "kind": "app_substrate", + "name": subdict["name"], + } + + subdict["action_list"].append(action) + + # Init deployment dict + d = deployment( + name=sd["name"], + min_replicas=sd["min_replicas"], + max_replicas=sd["max_replicas"], + ) + d.packages = [ref(p)] + d.substrate = ref(sub) + ddict = d.get_dict() + + # Setting the deployment level dependencies + ddict["depends_on_list"] = sd["depends_on_list"] + + # Add items + service_definition_list.append(sdict) + package_definition_list.append(pdict) + substrate_definition_list.append(subdict) + + app_profile["deployment_create_list"].append(ddict) + + for pdep in pod_deployments: + pod_dict = pdep.extract_deployment() + for sd in pod_dict["service_definition_list"]: + sdict = sd.get_dict() + service_definition_list.append(sdict) + + for pd in pod_dict["package_definition_list"]: + pdict = pd.get_dict() + package_definition_list.append(pdict) + + for sub in pod_dict["substrate_definition_list"]: + subdict = sub.get_dict() + substrate_definition_list.append(subdict) + + for psd in pod_dict["published_service_definition_list"]: + psddict = psd.get_dict() + published_service_definition_list.append(psddict) + + for dep in pod_dict["deployment_definition_list"]: + depdict = dep.get_dict() + app_profile["deployment_create_list"].append(depdict) + + blueprint_resources = { + "service_definition_list": service_definition_list, + "package_definition_list": package_definition_list, + "substrate_definition_list": substrate_definition_list, + "credential_definition_list": credential_definition_list, + "app_profile_list": app_profile_list, + "published_service_definition_list": published_service_definition_list, + } + + if default_cred: + blueprint_resources[ + "default_credential_local_reference" + ] = default_cred.get_dict() + + spec = { + "name": cls.__name__, + "description": cls.__doc__ or "", + "resources": blueprint_resources, + } + + metadata = { + "spec_version": 1, + "kind": "blueprint", + "name": cls.__name__, + "categories": categories or {}, + } + + blueprint = {"metadata": metadata, "spec": spec} + + return blueprint + + def make_single_vm_bp_dict(cls): + + bp_dict = cls.make_bp_dict() + + if len(bp_dict["spec"]["resources"]["substrate_definition_list"]) > 1: + return None + + subdict = bp_dict["spec"]["resources"]["substrate_definition_list"][0] + subdict["readiness_probe"] = {"disable_readiness_probe": True} + + if bp_dict["metadata"]["categories"]: + bp_dict["metadata"]["categories"]["TemplateType"] = "Vm" + else: + bp_dict["metadata"]["categories"] = {"TemplateType": "Vm"} + + return bp_dict + + +class SimpleBlueprintValidator(PropertyValidator, openapi_type="app_simple_blueprint"): + __default__ = None + __kind__ = SimpleBlueprintType + + +def simple_blueprint(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return SimpleBlueprintType(name, bases, kwargs) + + +SimpleBlueprint = simple_blueprint() diff --git a/framework/calm/dsl/builtins/models/simple_deployment.py b/framework/calm/dsl/builtins/models/simple_deployment.py new file mode 100644 index 0000000..7570454 --- /dev/null +++ b/framework/calm/dsl/builtins/models/simple_deployment.py @@ -0,0 +1,39 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + + +# SimpleDeployment + + +class SimpleDeploymentType(EntityType): + __schema_name__ = "SimpleDeployment" + __openapi_type__ = "app_blueprint_simple_deployment" + + def get_ref(cls): + """Note: Only deployment-level dependencies in simple blueprint""" + return super().get_ref(kind="app_blueprint_deployment") + + def get_task_target(cls): + cls_ref = cls.get_ref() + + # Note: Service to be appeneded in name for task targets + cls_ref.kind = "app_service" + cls_ref.name = str(cls) + "Service" + + return cls_ref + + +class SimpleDeploymentValidator( + PropertyValidator, openapi_type="app_blueprint_simple_deployment" +): + __default__ = None + __kind__ = SimpleDeploymentType + + +def simple_deployment(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return SimpleDeploymentType(name, bases, kwargs) + + +SimpleDeployment = simple_deployment() diff --git a/framework/calm/dsl/builtins/models/simple_pod_deployment.py b/framework/calm/dsl/builtins/models/simple_pod_deployment.py new file mode 100644 index 0000000..c409484 --- /dev/null +++ b/framework/calm/dsl/builtins/models/simple_pod_deployment.py @@ -0,0 +1,27 @@ +from .entity import Entity +from .validator import PropertyValidator +from .pod_deployment import PODDeploymentType + + +class SimplePODDeploymentType(PODDeploymentType): + __schema_name__ = "SimplePODDeployment" + __openapi_type__ = "app_simple_pod_deployment" + + def extract_deployment(cls): + return super().extract_deployment(is_simple_deployment=True) + + +class SimplePODDeploymentValidator( + PropertyValidator, openapi_type="app_simple_pod_deployment" +): + __default__ = None + __kind__ = SimplePODDeploymentType + + +def simple_pod_deployment(**kwargs): + name = kwargs.pop("name", None) or getattr(PODDeploymentType, "__schema_name__") + bases = (Entity,) + return SimplePODDeploymentType(name, bases, kwargs) + + +SimplePODDeployment = simple_pod_deployment() diff --git a/framework/calm/dsl/builtins/models/substrate.py b/framework/calm/dsl/builtins/models/substrate.py new file mode 100644 index 0000000..a652666 --- /dev/null +++ b/framework/calm/dsl/builtins/models/substrate.py @@ -0,0 +1,591 @@ +import sys +from distutils.version import LooseVersion as LV + +from .entity import EntityType, Entity, EntityTypeBase, EntityDict +from .validator import PropertyValidator +from .readiness_probe import readiness_probe +from .provider_spec import provider_spec +from .ahv_vm import AhvVmType, ahv_vm +from .client_attrs import update_dsl_metadata_map, get_dsl_metadata_map +from .metadata_payload import get_metadata_obj +from .helper import common as common_helper + +from calm.dsl.config import get_context +from calm.dsl.constants import CACHE, PROVIDER_ACCOUNT_TYPE_MAP +from calm.dsl.store import Cache +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +# Substrate + + +class SubstrateDict(EntityDict): + @staticmethod + def pre_validate(vdict, name, value): + if name == "readiness_probe": + if isinstance(value, dict): + rp_validator, is_array = vdict[name] + rp_cls_type = rp_validator.get_kind() + return rp_cls_type(None, (Entity,), value) + + return value + + +class SubstrateType(EntityType): + __schema_name__ = "Substrate" + __openapi_type__ = "app_substrate" + __prepare_dict__ = SubstrateDict + + ALLOWED_FRAGMENT_ACTIONS = { + "__pre_create__": "pre_action_create", + "__post_delete__": "post_action_delete", + } + + def get_profile_environment(cls): + """returns the profile environment, if substrate has been defined in blueprint file""" + + cls_bp = common_helper._walk_to_parent_with_given_type(cls, "BlueprintType") + environment = {} + if cls_bp: + for cls_profile in cls_bp.profiles: + for cls_deployment in cls_profile.deployments: + if cls_deployment.substrate.name != str(cls): + continue + + environment = getattr(cls_profile, "environment", {}) + if environment: + LOG.debug( + "Found environment {} associated to app-profile {}".format( + environment.get("name"), cls_profile + ) + ) + break + return environment + + def get_referenced_account_uuid(cls): + """ + SUBSTRATE GIVEN UNDER BLUEPRINT + If calm-version < v3.2.0: + 1. account_reference is not available at substrate-level, So need to read from project only + If calm-version >= 3.2.0: + 1. account_reference is available at substrate-level + 1.a: If env is given at profile-level, then account must be whitelisted in environment + 1.b: If env is not given at profile-level, then account must be whitelisted in project + 2. If account_reference is not available at substrate-level + 2.a: If env is given at profile-level, return provider account in env + 2.b: If env is not given at profile-level, return provider account in project + + SUBSTRATE GIVEN UNDER ENVIRONMENT + If calm-version < v3.2.0: + 1. account_reference is not available at substrate-level, So need to read from project only + If calm-version >= 3.2.0: + 1. account_reference is available at substrate-level + 1. account must be filtered at environment + 2. If account_reference is not available at substrate-level + 2.a: return provider account whitelisted in environment + + """ + + provider_account = getattr(cls, "account", {}) + calm_version = Version.get_version("Calm") + provider_type = getattr(cls, "provider_type") + provider_account_type = PROVIDER_ACCOUNT_TYPE_MAP.get(provider_type, "") + if not provider_account_type: + return "" + + # Fetching project data + project_cache_data = common_helper.get_cur_context_project() + project_name = project_cache_data.get("name") + project_accounts = project_cache_data.get("accounts_data", {}).get( + provider_account_type, [] + ) + if not project_accounts: + LOG.error( + "No '{}' account registered to project '{}'".format( + provider_account_type, project_name + ) + ) + sys.exit(-1) + + # If substrate is defined in blueprint file + cls_bp = common_helper._walk_to_parent_with_given_type(cls, "BlueprintType") + if cls_bp: + environment = {} + for cls_profile in cls_bp.profiles: + for cls_deployment in cls_profile.deployments: + if cls_deployment.substrate.name != str(cls): + continue + + environment = getattr(cls_profile, "environment", {}) + if environment: + LOG.debug( + "Found environment {} associated to app-profile {}".format( + environment.get("name"), cls_profile + ) + ) + break + + # If environment is given at profile level + if environment: + environment_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment["uuid"] + ) + if not environment_cache_data: + LOG.error( + "Environment {} not found. Please run: calm update cache".format( + environment["name"] + ) + ) + sys.exit(-1) + + accounts = environment_cache_data.get("accounts_data", {}).get( + provider_account_type, [] + ) + if not accounts: + LOG.error( + "Environment '{}' has no '{}' account.".format( + environment_cache_data.get("name", ""), + provider_account_type, + ) + ) + sys.exit(-1) + + # If account given at substrate, it should be whitelisted in environment + if provider_account and provider_account["uuid"] != accounts[0]["uuid"]: + LOG.error( + "Account '{}' not filtered in environment '{}'".format( + provider_account["name"], + environment_cache_data.get("name", ""), + ) + ) + sys.exit(-1) + + # If provider_account is not given, then fetch from env + elif not provider_account: + provider_account = { + "name": accounts[0]["name"], + "uuid": accounts[0]["uuid"], + } + + # If environment is not given at profile level + else: + # if provider_account is given, it should be part of project + if not project_accounts: + LOG.error( + "No '{}' account registered to project '{}'".format( + provider_account_type, project_name + ) + ) + sys.exit(-1) + + if ( + provider_account + and provider_account["uuid"] not in project_accounts + ): + LOG.error( + "Account '{}' not filtered in project '{}'".format( + provider_account["name"], project_name + ) + ) + sys.exit(-1) + + # Else take first account in project + elif not provider_account: + provider_account = {"uuid": project_accounts[0], "kind": "account"} + + # If substrate defined inside environment + cls_env = common_helper._walk_to_parent_with_given_type(cls, "EnvironmentType") + if cls_env: + infra = getattr(cls_env, "providers", []) + whitelisted_account = {} + for _pdr in infra: + if _pdr.type == PROVIDER_ACCOUNT_TYPE_MAP[provider_type]: + whitelisted_account = _pdr.account_reference.get_dict() + break + + if LV(calm_version) >= LV("3.2.0"): + if provider_account and provider_account[ + "uuid" + ] != whitelisted_account.get("uuid", ""): + LOG.error( + "Account '{}' not filtered in environment '{}'".format( + provider_account["name"], str(cls_env) + ) + ) + sys.exit(-1) + + elif not whitelisted_account: + LOG.error( + "No account is filtered in environment '{}'".format( + str(cls_env) + ) + ) + sys.exit(-1) + + elif not provider_account: + provider_account = whitelisted_account + + # If version is less than 3.2.0, then it should use account from poroject only, OR + # If no account is supplied, will take 0th account in project (in both case of blueprint/environment) + if not provider_account: + provider_account = {"uuid": project_accounts[0], "kind": "account"} + + return provider_account["uuid"] + + def compile(cls): + + cdict = super().compile() + + readiness_probe_dict = {} + if "readiness_probe" in cdict and cdict["readiness_probe"]: + readiness_probe_dict = cdict["readiness_probe"] + if hasattr(readiness_probe_dict, "compile"): + readiness_probe_dict = readiness_probe_dict.compile() + else: + readiness_probe_dict = readiness_probe().compile() + + # Fill out os specific details if not found + if cdict["os_type"] == "Linux": + if not readiness_probe_dict.get("connection_type", ""): + readiness_probe_dict["connection_type"] = "SSH" + + if not readiness_probe_dict.get("connection_port", ""): + readiness_probe_dict["connection_port"] = 22 + + if not readiness_probe_dict.get("connection_protocol", ""): + readiness_probe_dict["connection_protocol"] = "" + + else: + if not readiness_probe_dict.get("connection_type", ""): + readiness_probe_dict["connection_type"] = "POWERSHELL" + + if not readiness_probe_dict.get("connection_port", ""): + readiness_probe_dict["connection_port"] = 5985 + + if not readiness_probe_dict.get("connection_protocol", ""): + readiness_probe_dict["connection_protocol"] = "http" + + if cdict.get("vm_recovery_spec", {}) and cdict["type"] != "AHV_VM": + LOG.error( + "Recovery spec is supported only for AHV_VM substrate (given {})".format( + cdict["type"] + ) + ) + sys.exit("Unknown attribute vm_recovery_spec given") + + # Handle cases for empty readiness_probe and vm_recovery_spec + if cdict["type"] == "AHV_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict[ + "address" + ] = "@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@" + + if cdict.get("vm_recovery_spec", {}): + _vrs = cdict.pop("vm_recovery_spec", None) + if _vrs: + cdict["create_spec"] = ahv_vm( + name=_vrs.vm_name, resources=_vrs.vm_override_resources + ) + cdict["recovery_point_reference"] = _vrs.recovery_point + + elif cdict["type"] == "EXISTING_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict["address"] = "@@{ip_address}@@" + + elif cdict["type"] == "AWS_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict["address"] = "@@{public_ip_address}@@" + + elif cdict["type"] == "K8S_POD": # Never used (Omit after discussion) + readiness_probe_dict["address"] = "" + cdict.pop("editables", None) + + elif cdict["type"] == "AZURE_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict[ + "address" + ] = "@@{platform.publicIPAddressList[0]}@@" + + elif cdict["type"] == "VMWARE_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict["address"] = "@@{platform.ipAddressList[0]}@@" + + elif cdict["type"] == "GCP_VM": + if not readiness_probe_dict.get("address", ""): + readiness_probe_dict[ + "address" + ] = "@@{platform.networkInterfaces[0].accessConfigs[0].natIP}@@" + + else: + raise Exception("Un-supported vm type :{}".format(cdict["type"])) + + if not cdict.get("vm_recovery_spec", {}): + cdict.pop("vm_recovery_spec", None) + + # Adding min defaults in vm spec required by each provider + if not cdict.get("create_spec"): + + # TODO shift them to constants file + provider_type_map = { + "AWS_VM": "aws", + "VMWARE_VM": "vmware", + "AHV_VM": "nutanix_pc", # Accounts of type nutanix are not used after 2.9 + "AZURE_VM": "azure", + "GCP_VM": "gcp", + } + + if cdict["type"] in provider_type_map: + if cdict["type"] == "AHV_VM": + # UI expects defaults. Jira: https://jira.nutanix.com/browse/CALM-20134 + if not cdict.get("create_spec"): + cdict["create_spec"] = {"resources": {"nic_list": []}} + + else: + # Getting the account_uuid for each provider + # Getting the metadata obj + metadata_obj = get_metadata_obj() + project_ref = metadata_obj.get("project_reference") or dict() + + # If project not found in metadata, it will take project from config + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_ref.get("name", project_config["name"]) + + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format( + project_name + ) + ) + sys.exit(-1) + + # Registered accounts + project_accounts = project_cache_data["accounts_data"] + provider_type = provider_type_map[cdict["type"]] + account_uuids = project_accounts.get(provider_type, []) + if not account_uuids: + LOG.error( + "No {} account registered in project '{}'".format( + provider_type, project_name + ) + ) + sys.exit(-1) + + # Adding default spec + cdict["create_spec"] = { + "resources": {"account_uuid": account_uuids[0]} + } + + # Template attribute should be present for vmware spec + if cdict["type"] == "VMWARE_VM": + cdict["create_spec"]["template"] = "" + + # Modifying the editable object + provider_spec_editables = cdict.pop("editables", {}) + cdict["editables"] = {} + + if provider_spec_editables: + cdict["editables"]["create_spec"] = provider_spec_editables + + # Popping out the editables from readiness_probe + readiness_probe_editables = readiness_probe_dict.pop("editables_list", []) + if readiness_probe_editables: + cdict["editables"]["readiness_probe"] = { + k: True for k in readiness_probe_editables + } + + # In case we have read provider_spec from a yaml file, validate that we have consistent values for + # Substrate.account (if present) and account_uuid in provider_spec (if present). + # The account_uuid mentioned in provider_spec yaml should be a registered PE under the Substrate.account PC + + substrate_account_uuid = cls.get_referenced_account_uuid() + spec_account_uuid = "" + try: + spec_account_uuid = cdict["create_spec"]["resources"]["account_uuid"] + except (AttributeError, TypeError, KeyError): + pass + + if substrate_account_uuid: + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=substrate_account_uuid + ) + if not account_cache_data: + LOG.error( + "Account (uuid={}) not found. Please update cache".format( + substrate_account_uuid + ) + ) + sys.exit(-1) + account_name = account_cache_data["name"] + + if spec_account_uuid: + if cdict["type"] == "AHV_VM": + if ( + not account_cache_data.get("data", {}) + .get("clusters", {}) + .get(spec_account_uuid) + ): + LOG.error( + "cluster account_uuid (uuid={}) used in the provider spec is not found to be registered" + " under the Nutanix PC account {}. Please update cache".format( + spec_account_uuid, account_name + ) + ) + sys.exit(-1) + + elif cdict["type"] != "EXISTING_VM": + if spec_account_uuid != substrate_account_uuid: + LOG.error( + "Account '{}'(uuid='{}') not matched with account_uuid used in provider-spec (uuid={})".format( + account_name, substrate_account_uuid, spec_account_uuid + ) + ) + sys.exit(-1) + + else: + # if account_uuid is not available add it + if cdict["type"] == "AHV_VM": + + # default is first cluster account + account_uuid = list(account_cache_data["data"]["clusters"].keys())[ + 0 + ] + + _cs = cdict["create_spec"] + + if isinstance(_cs, AhvVmType): + # NOTE: We cann't get subnet_uuid here, as it involved parent reference + subnet_name = "" + cluster_name = _cs.cluster or "" + _nics = _cs.resources.nics + if cluster_name: + account_uuid = common_helper.get_pe_account_using_pc_account_uuid_and_cluster_name( + pc_account_uuid=substrate_account_uuid, + cluster_name=cluster_name, + ) + else: + for _nic in _nics: + _nic_dict = _nic.subnet_reference.get_dict() + if _nic_dict["cluster"] and not common_helper.is_macro( + _nic_dict["name"] + ): + subnet_name = _nic_dict["name"] + cluster_name = _nic_dict["cluster"] + break + + # calm_version = Version.get_version("Calm") + # if LV(calm_version) >= LV("3.5.0") and not cluster_name: + # raise Exception("Unable to infer cluster for vm") + if subnet_name: + account_uuid = common_helper.get_pe_account_uuid_using_pc_account_uuid_and_nic_data( + pc_account_uuid=substrate_account_uuid, + subnet_name=subnet_name, + cluster_name=cluster_name, + ) + + # Assigning the pe account uuid to ahv vm resources + _cs.resources.account_uuid = account_uuid + + else: + subnet_uuid = "" + _nics = _cs.get("resources", {}).get("nic_list", []) + + for _nic in _nics: + _nu = _nic["subnet_reference"].get("uuid", "") + if _nu and not common_helper.is_macro(_nu): + subnet_uuid = _nu + break + + if subnet_uuid: + account_uuid = common_helper.get_pe_account_uuid_using_pc_account_uuid_and_subnet_uuid( + pc_account_uuid=substrate_account_uuid, + subnet_uuid=subnet_uuid, + ) + + cdict["create_spec"]["resources"]["account_uuid"] = account_uuid + + # Add account uuid for non-ahv providers + if cdict["type"] not in ["EXISTING_VM", "AHV_VM", "K8S_POD"]: + cdict["create_spec"]["resources"]["account_uuid"] = substrate_account_uuid + + cdict.pop("account_reference", None) + cdict["readiness_probe"] = readiness_probe_dict + + return cdict + + def pre_compile(cls): + """Adds Ahvvm data to substrate metadata""" + super().pre_compile() + + # Adding mapping for substrate class in case of AHV provider + types = EntityTypeBase.get_entity_types() + AhvVmType = types.get("AhvVm", None) + + provider_spec = cls.provider_spec + if isinstance(provider_spec, AhvVmType): + ui_name = getattr(cls, "name", "") or cls.__name__ + sub_metadata = get_dsl_metadata_map([cls.__schema_name__, ui_name]) + + vm_dsl_name = provider_spec.__name__ + vm_display_name = getattr(provider_spec, "name", "") or vm_dsl_name + + sub_metadata[AhvVmType.__schema_name__] = { + vm_display_name: {"dsl_name": vm_dsl_name} + } + + update_dsl_metadata_map( + cls.__schema_name__, entity_name=ui_name, entity_obj=sub_metadata + ) + + @classmethod + def pre_decompile(mcls, cdict, context=[], prefix=""): + + # Handle provider_spec + cdict = super().pre_decompile(cdict, context, prefix=prefix) + cdict["create_spec"] = provider_spec(cdict["create_spec"]) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + + if cdict["type"] == "K8S_POD": + LOG.error("Decompilation support for pod deployments is not available.") + sys.exit(-1) + + cls = super().decompile(cdict, context=context, prefix=prefix) + + provider_spec = cls.provider_spec + if cls.provider_type == "AHV_VM": + context = [cls.__schema_name__, getattr(cls, "name", "") or cls.__name__] + vm_cls = AhvVmType.decompile(provider_spec, context=context, prefix=prefix) + + cls.provider_spec = vm_cls + + return cls + + def get_task_target(cls): + return cls.get_ref() + + +class SubstrateValidator(PropertyValidator, openapi_type="app_substrate"): + __default__ = None + __kind__ = SubstrateType + + +def substrate(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return SubstrateType(name, bases, kwargs) + + +Substrate = substrate() diff --git a/framework/calm/dsl/builtins/models/task.py b/framework/calm/dsl/builtins/models/task.py new file mode 100644 index 0000000..63536cc --- /dev/null +++ b/framework/calm/dsl/builtins/models/task.py @@ -0,0 +1,1625 @@ +import enum +import uuid +import os +import sys + + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .ref import RefType +from .task_input import TaskInputType +from .variable import CalmVariable +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class Status(enum.Enum): + + SUCCESS = 1 + FAILURE = 2 + DONT_CARE = 3 + + +EXIT_CONDITION_MAP = { + Status.SUCCESS: "on_success", + Status.FAILURE: "on_failure", + Status.DONT_CARE: "dont_care", +} + +# Task + + +class TaskType(EntityType): + __schema_name__ = "Task" + __openapi_type__ = "app_task" + + def compile(cls): + cdict = super().compile() + if (cdict.get("target_any_local_reference", None) or None) is None: + cdict.pop("target_any_local_reference", None) + if (cdict.get("exec_target_reference", None) or None) is None: + cdict.pop("exec_target_reference", None) + return cdict + + @classmethod + def pre_decompile(mcls, cdict, context=[], prefix=""): + + cdict = super().pre_decompile(cdict, context=context, prefix=prefix) + # Removing additional attributes + cdict.pop("state", None) + cdict.pop("message_list", None) + + if "__name__" in cdict: + cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"]) + + return cdict + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + + attrs = cdict.get("attrs", None) or dict() + + cred = attrs.get("login_credential_local_reference", None) + if cred: + attrs["login_credential_local_reference"] = RefType.decompile( + cred, prefix=prefix + ) + + task_type = cdict.get("type", None) or "" + + # If task is of type DAG, decompile references there also + if task_type == "DAG": + edges = attrs.get("edges", None) or [] + final_edges = [] + for edge in edges: + final_edges.append( + { + "from_task_reference": RefType.decompile( + edge["from_task_reference"], prefix=prefix + ), + "to_task_reference": RefType.decompile( + edge["to_task_reference"], prefix=prefix + ), + } + ) + if final_edges: + attrs["edges"] = final_edges + + elif task_type == "CALL_RUNBOOK": + attrs["runbook_reference"] = RefType.decompile( + attrs["runbook_reference"], prefix=prefix + ) + + elif task_type == "CALL_CONFIG": + attrs["config_spec_reference"] = attrs["config_spec_reference"]["name"] + + elif task_type == "HTTP": + + auth_obj = attrs.get("authentication", {}) + auth_type = auth_obj.get("type", "") + + # Note For decompiling, only authentication object of type 'basic_with_cred' works bcz we cann't take secret values at client side + if auth_type == "basic_with_cred": + auth_cred = auth_obj.get("credential_local_reference", None) + if auth_cred: + auth_obj["credential_local_reference"] = RefType.decompile( + auth_cred, prefix=prefix + ) + + cdict["attrs"] = attrs + + return super().decompile(cdict, context=context, prefix=prefix) + + +class TaskValidator(PropertyValidator, openapi_type="app_task"): + __default__ = None + __kind__ = TaskType + + +def _task(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return TaskType(name, bases, kwargs) + + +Task = _task() + + +def _get_target_ref(target): + """ + Get the target reference. Converts target to a ref if it is an entity. + Args: + target (Entity/Ref): Entity/Ref that is the target for this task + Returns: + (Ref): Target reference + """ + if target is not None: + if not isinstance(target, RefType) and isinstance(target, EntityType): + target = target.get_ref() + return target + + +def _task_create(**kwargs): + + name = kwargs.get("name", kwargs.pop("__name__", None)) + if name is None: + name = "_" + getattr(TaskType, "__schema_name__") + str(uuid.uuid4())[:8] + kwargs["name"] = name + + return _task(**kwargs) + + +def create_call_rb(runbook, target=None, name=None): + kwargs = { + "name": name + or "Call_Runbook_task_for_{}__{}".format(runbook.name, str(uuid.uuid4())[:8]), + "type": "CALL_RUNBOOK", + "attrs": {"runbook_reference": runbook.get_ref()}, + } + if target is not None: + kwargs["target_any_local_reference"] = _get_target_ref(target) + else: + main_dag = [ + task + for task in runbook.tasks + if task.name == runbook.main_task_local_reference.name + ][0] + kwargs["target_any_local_reference"] = main_dag.target_any_local_reference + + if main_dag.target_any_local_reference.kind != "app_service": + LOG.error("Runbook Tasks are only allowed for service actions") + sys.exit(-1) + + return _task_create(**kwargs) + + +def create_call_config(target, config, name): + kwargs = { + "name": name + or "Call_Config_task_for_{}__{}".format(target.name, str(uuid.uuid4())[:8]), + "type": "CALL_CONFIG", + "attrs": {"config_spec_reference": _get_target_ref(config)}, + } + kwargs["target_any_local_reference"] = _get_target_ref(target) + return _task_create(**kwargs) + + +def _exec_create( + script_type, + script=None, + filename=None, + name=None, + target=None, + target_endpoint=None, + cred=None, + depth=2, + tunnel=None, + **kwargs, +): + if script is not None and filename is not None: + raise ValueError( + "Only one of script or filename should be given for exec task " + + (name or "") + ) + if script_type != "static" and tunnel is not None: + raise ValueError("Tunnel is supported only for Escript script type") + + if filename is not None: + file_path = os.path.join( + os.path.dirname(sys._getframe(depth).f_globals.get("__file__")), filename + ) + with open(file_path, "r") as scriptf: + script = scriptf.read() + + if script is None: + raise ValueError( + "One of script or filename is required for exec task " + (name or "") + ) + params = { + "name": name, + "type": "EXEC", + "attrs": {"script_type": script_type, "script": script}, + } + if cred is not None: + params["attrs"]["login_credential_local_reference"] = _get_target_ref(cred) + if target is not None: + params["target_any_local_reference"] = _get_target_ref(target) + if target_endpoint is not None: + params["exec_target_reference"] = _get_target_ref(target_endpoint) + if tunnel is not None: + params["attrs"]["tunnel_reference"] = tunnel + if "inherit_target" in kwargs: + params["inherit_target"] = kwargs.get("inherit_target") + return _task_create(**params) + + +def _decision_create( + script_type, + script=None, + filename=None, + name=None, + target=None, + cred=None, + depth=2, + tunnel=None, + **kwargs, +): + if script is not None and filename is not None: + raise ValueError( + "Only one of script or filename should be given for decision task " + + (name or "") + ) + + if filename is not None: + file_path = os.path.join( + os.path.dirname(sys._getframe(depth).f_globals.get("__file__")), filename + ) + + with open(file_path, "r") as scriptf: + script = scriptf.read() + + if script is None: + raise ValueError( + "One of script or filename is required for decision task " + (name or "") + ) + + if script_type != "static" and tunnel is not None: + raise ValueError("Tunnel is support only for Escript script type") + + params = { + "name": name, + "type": "DECISION", + "attrs": {"script_type": script_type, "script": script}, + } + if cred is not None: + params["attrs"]["login_credential_local_reference"] = _get_target_ref(cred) + if target is not None: + params["target_any_local_reference"] = _get_target_ref(target) + if tunnel is not None: + params["attrs"]["tunnel_reference"] = tunnel + if "inherit_target" in kwargs: + params["inherit_target"] = kwargs.get("inherit_target") + return _task_create(**params) + + +def dag(name=None, child_tasks=None, edges=None, target=None): + """ + Create a DAG task + Args: + name (str): Name for the task + child_tasks (list [Task]): Child tasks within this dag + edges (list [tuple (Ref, Ref)]): List of tuples of ref(Task). + Each element denotes an edge from + first task to the second. + target (Ref): Target entity reference + Returns: + (Task): DAG task + """ + dag_edges = [] + for edge in edges or []: + if len(edge) != 2: + raise ValueError("DAG edges require a tuple of two task references") + for task_ref in edge: + if not getattr(task_ref, "__kind__") == "app_ref": + raise ValueError("{} is not a valid task reference".format(task_ref)) + from_ref = edge[0] + to_ref = edge[1] + dag_edges.append({"from_task_reference": from_ref, "to_task_reference": to_ref}) + + # This follows UI naming convention for runbooks + name = name or str(uuid.uuid4())[:8] + "_dag" + kwargs = { + "name": name, + "child_tasks_local_reference_list": [ + task.get_ref() for task in child_tasks or [] + ], + "attrs": {"edges": dag_edges}, + "type": "DAG", + } + if target: + kwargs["target_any_local_reference"] = target + + return _task_create(**kwargs) + + +def parallel_task(name=None, child_tasks=[], attrs={}): + """ + Create a PARALLEL task + Args: + name (str): Name for the task + child_tasks (list [Task]): Child tasks within this dag + attrs (dict): Task's attrs + Returns: + (Task): PARALLEL task + """ + + # This follows UI naming convention for runbooks + name = name or str(uuid.uuid4())[:8] + "_parallel" + kwargs = { + "name": name, + "child_tasks_local_reference_list": [ + task.get_ref() for task in child_tasks or [] + ], + "type": "PARALLEL", + } + + return _task_create(**kwargs) + + +def while_loop(name=None, child_tasks=[], attrs={}, **kwargs): + """ + Create a WHILE LOOP + Args: + name (str): Name for the task + child_tasks (list [Task]): Child tasks within this dag + attrs (dict): Task's attrs + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + (Task): WHILE task + """ + + # This follows UI naming convention for runbooks + name = name or str(uuid.uuid4())[:8] + "_while_loop" + params = { + "name": name, + "child_tasks_local_reference_list": [ + task.get_ref() for task in child_tasks or [] + ], + "type": "WHILE_LOOP", + "attrs": attrs, + } + if "inherit_target" in kwargs: + params["inherit_target"] = kwargs.get("inherit_target") + return _task_create(**params) + + +def meta(name=None, child_tasks=None, edges=None, target=None): + """ + Create a META task + Args: + name (str): Name for the task + child_tasks (list [Task]): Child tasks within this dag + edges (list [tuple (Ref, Ref)]): List of tuples of ref(Task). + Each element denotes an edge from + first task to the second. + target (Ref): Target entity reference + Returns: + (Task): DAG task + """ + # This follows UI naming convention for runbooks + name = name or str(uuid.uuid4())[:8] + "_meta" + kwargs = { + "name": name, + "child_tasks_local_reference_list": [ + task.get_ref() for task in child_tasks or [] + ], + "type": "META", + } + if target: + kwargs["target_any_local_reference"] = target + + return _task_create(**kwargs) + + +def exec_task_ssh( + script=None, + filename=None, + name=None, + target=None, + target_endpoint=None, + cred=None, + depth=2, + **kwargs, +): + return _exec_create( + "sh", + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=target_endpoint, + cred=cred, + depth=depth, + **kwargs, + ) + + +def exec_task_escript( + script=None, filename=None, name=None, target=None, depth=2, tunnel=None, **kwargs +): + return _exec_create( + "static", + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=None, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + + +def exec_task_powershell( + script=None, + filename=None, + name=None, + target=None, + target_endpoint=None, + cred=None, + depth=2, + **kwargs, +): + return _exec_create( + "npsscript", + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=target_endpoint, + cred=cred, + depth=depth, + **kwargs, + ) + + +def exec_task_ssh_runbook( + script=None, filename=None, name=None, target=None, cred=None, depth=2, **kwargs +): + """ + This function is used to create exec task with shell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Exec task object + """ + return _exec_create( + "sh", + script=script, + filename=filename, + name=name, + target=target, + cred=cred, + depth=depth, + **kwargs, + ) + + +def exec_task_powershell_runbook( + script=None, filename=None, name=None, target=None, cred=None, depth=2, **kwargs +): + """ + This function is used to create exec task with shell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Exec task object + """ + return _exec_create( + "npsscript", + script=script, + filename=filename, + name=name, + target=target, + cred=cred, + depth=depth, + **kwargs, + ) + + +def decision_task_ssh( + script=None, filename=None, name=None, target=None, cred=None, depth=2, **kwargs +): + """ + This function is used to create decision task with shell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Decision task object + """ + return _decision_create( + "sh", + script=script, + filename=filename, + name=name, + target=target, + cred=cred, + depth=depth, + **kwargs, + ) + + +def decision_task_powershell( + script=None, filename=None, name=None, target=None, cred=None, depth=2, **kwargs +): + """ + This function is used to create decision task with powershell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Decision task object + """ + return _decision_create( + "npsscript", + script=script, + filename=filename, + name=name, + target=target, + cred=cred, + depth=depth, + **kwargs, + ) + + +def decision_task_escript( + script=None, + filename=None, + name=None, + target=None, + cred=None, + depth=2, + tunnel=None, + **kwargs, +): + """ + This function is used to create decision task with escript target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + tunnel (ref.Tunnel): Tunnel reference + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Decision task object + """ + return _decision_create( + "static", + script=script, + filename=filename, + name=name, + target=target, + cred=cred, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + + +def _set_variable_create(task, variables=None): + task.type = "SET_VARIABLE" + eval_variables = [] + for var in variables or []: + if not isinstance(var, str): + raise TypeError( + "Expected string in set variable task variables list, got {}".format( + type(var) + ) + ) + eval_variables.append(var) + task.attrs["eval_variables"] = eval_variables + return task + + +def set_variable_task_ssh( + script=None, + filename=None, + name=None, + target=None, + target_endpoint=None, + variables=None, + depth=3, + cred=None, + **kwargs, +): + """ + This function is used to create set variable task with shell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Set variable task object + """ + task = exec_task_ssh( + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=target_endpoint, + depth=depth, + cred=cred, + **kwargs, + ) + return _set_variable_create(task, variables) + + +def set_variable_task_escript( + script=None, + filename=None, + name=None, + target=None, + variables=None, + depth=3, + tunnel=None, + **kwargs, +): + """ + This function is used to create set variable task with escript target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + tunnel (Ref.Tunnel): Tunnel reference + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Set variable task object + """ + task = exec_task_escript( + script=script, + filename=filename, + name=name, + target=target, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + return _set_variable_create(task, variables) + + +def set_variable_task_powershell( + script=None, + filename=None, + name=None, + target=None, + target_endpoint=None, + variables=None, + depth=3, + cred=None, + **kwargs, +): + """ + This function is used to create set variable task with powershell target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + cred (Entity/Ref): Entity/Ref that is the cred for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Set variable task object + """ + task = exec_task_powershell( + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=target_endpoint, + depth=depth, + cred=cred, + **kwargs, + ) + return _set_variable_create(task, variables) + + +def http_task_on_endpoint( + method, + relative_url=None, + body=None, + headers=None, + secret_headers=None, + content_type=None, + status_mapping=None, + response_paths=None, + name=None, + target=None, + **kwargs, +): + """ + + Defines a HTTP Task on http endpoint target. + + Args: + method (str): HTTP method ("GET", "POST", "PUT", "DELETE", ..) + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + content_type (string): Request Content-Type (application/json, application/xml, etc.) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + (Task): HTTP Task + """ + return http_task( + method, + "", # As url is present is target endpoint + body=body, + relative_url=relative_url, + headers=headers, + secret_headers=secret_headers, + content_type=content_type, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + **kwargs, + ) + + +def http_task_get_on_endpoint(**kwargs): + """ + + Defines a HTTP GET Task on http endpoint target. + + Args: + kwargs (Ref): keyword arguments for http task on endpoint + Returns: + (Task): HTTP Task + """ + return http_task_on_endpoint("GET", **kwargs) + + +def http_task_post_on_endpoint(**kwargs): + """ + + Defines a HTTP POST Task on http endpoint target. + + Args: + kwargs (Ref): keyword arguments for http task on endpoint + Returns: + (Task): HTTP Task + """ + return http_task_on_endpoint("POST", **kwargs) + + +def http_task_put_on_endpoint(**kwargs): + """ + + Defines a HTTP PUT Task on http endpoint target. + + Args: + kwargs (Ref): keyword arguments for http task on endpoint + Returns: + (Task): HTTP Task + """ + return http_task_on_endpoint("PUT", **kwargs) + + +def http_task_delete_on_endpoint(**kwargs): + """ + + Defines a HTTP GET Task on http endpoint target. + + Args: + kwargs (Ref): keyword arguments for http task on endpoint + Returns: + (Task): HTTP Task + """ + return http_task_on_endpoint("DELETE", **kwargs) + + +def http_task_get( + url, + body=None, + headers=None, + secret_headers=None, + credential=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + cred=None, + tunnel=None, +): + """ + + Defines a HTTP GET Task. + + Args: + url (str): Request URL (https://example.com/dummy_url) + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + credential (Credential): Credential object. Currently only supports basic auth. + cred (Credential reference): Used for basic_with_cred authentication + content_type (string): Request Content-Type (application/json, application/xml, etc.) + timeout (int): Request timeout in seconds (Default: 120) + verify (bool): TLS verify (Default: False) + retries (int): Number of times to retry this request if it fails. (Default: 0) + retry_interval (int): Time to wait in seconds between retries (Default: 10) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + tunnel (Ref.Tunnel): Tunnel reference + Returns: + (Task): HTTP Task + """ + return http_task( + "GET", + url, + body=None, + headers=headers, + secret_headers=secret_headers, + credential=credential, + cred=cred, + content_type=content_type, + timeout=timeout, + verify=verify, + retries=retries, + retry_interval=retry_interval, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + tunnel=tunnel, + ) + + +def http_task_post( + url, + body=None, + headers=None, + secret_headers=None, + credential=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + cred=None, + tunnel=None, +): + """ + + Defines a HTTP POST Task. + + Args: + url (str): Request URL (https://example.com/dummy_url) + body (str): Request body + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + credential (Credential): Credential object. Currently only supports basic auth. + cred (Credential reference): Used for basic_with_cred authentication + content_type (string): Request Content-Type (application/json, application/xml, etc.) + timeout (int): Request timeout in seconds (Default: 120) + verify (bool): TLS verify (Default: False) + retries (int): Number of times to retry this request if it fails. (Default: 0) + retry_interval (int): Time to wait in seconds between retries (Default: 10) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + tunnel (Ref.Tunnel): Tunnel reference + Returns: + (Task): HTTP Task + """ + return http_task( + "POST", + url, + body=body, + headers=headers, + secret_headers=secret_headers, + credential=credential, + cred=cred, + content_type=content_type, + timeout=timeout, + verify=verify, + retries=retries, + retry_interval=retry_interval, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + tunnel=tunnel, + ) + + +def http_task_put( + url, + body=None, + headers=None, + secret_headers=None, + credential=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + cred=None, + tunnel=None, +): + """ + + Defines a HTTP PUT Task. + + Args: + url (str): Request URL (https://example.com/dummy_url) + body (str): Request body + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + credential (Credential): Credential object. Currently only supports basic auth. + cred (Credential reference): Used for basic_with_cred authentication + content_type (string): Request Content-Type (application/json, application/xml, etc.) + timeout (int): Request timeout in seconds (Default: 120) + verify (bool): TLS verify (Default: False) + retries (int): Number of times to retry this request if it fails. (Default: 0) + retry_interval (int): Time to wait in seconds between retries (Default: 10) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + tunnel (Ref.Tunnel): Tunnel reference + Returns: + (Task): HTTP Task + """ + return http_task( + "PUT", + url, + body=body, + headers=headers, + secret_headers=secret_headers, + credential=credential, + cred=cred, + content_type=content_type, + timeout=timeout, + verify=verify, + retries=retries, + retry_interval=retry_interval, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + tunnel=tunnel, + ) + + +def http_task_delete( + url, + body=None, + headers=None, + secret_headers=None, + credential=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + cred=None, + tunnel=None, +): + """ + + Defines a HTTP DELETE Task. + + Args: + url (str): Request URL (https://example.com/dummy_url) + body (str): Request body + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + credential (Credential): Credential object. Currently only supports basic auth. + cred (Credential reference): Used for basic_with_cred authentication + content_type (string): Request Content-Type (application/json, application/xml, etc.) + timeout (int): Request timeout in seconds (Default: 120) + verify (bool): TLS verify (Default: False) + retries (int): Number of times to retry this request if it fails. (Default: 0) + retry_interval (int): Time to wait in seconds between retries (Default: 10) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + tunnel (Ref.Tunnel): Tunnel Reference + Returns: + (Task): HTTP Task + """ + return http_task( + "DELETE", + url, + body=body, + headers=headers, + secret_headers=secret_headers, + credential=credential, + cred=cred, + content_type=content_type, + timeout=timeout, + verify=verify, + retries=retries, + retry_interval=retry_interval, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + tunnel=tunnel, + ) + + +def _header_variables_from_dict(headers, secret=False): + variables = [] + LOG.debug("Headers for HTTP task : {}".format(headers)) + if not isinstance(headers, dict): + raise TypeError( + "Headers for HTTP task " + + (headers or "") + + " should be dictionary of strings" + ) + for var_name, var_value in headers.items(): + if not isinstance(var_name, str): + raise TypeError( + "Headers for HTTP task " + + (var_name or "") + + " should be dictionary of strings" + ) + if not isinstance(var_value, str): + raise TypeError( + "Headers for HTTP task " + + (var_value or "") + + " should be dictionary of strings" + ) + if secret: + variable = CalmVariable.Simple.Secret.string(name=var_name, value=var_value) + else: + variable = CalmVariable.Simple.string(name=var_name, value=var_value) + variables.append(variable) + return variables + + +def http_task( + method, + url, + relative_url=None, + body=None, + headers=None, + secret_headers=None, + credential=None, + cred=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + tunnel=None, + **kwargs, +): + """ + Defines a HTTP Task. + + Args: + method (str): Request method (GET, PUT, POST, DELETE, etc.) + url (str): Request URL (https://example.com/dummy_url) + body (str): Request body + headers (dict): Request headers + secret_headers (dict): Request headers that are to be masked + credential (Credential): Credential object. Currently only supports basic auth. + cred (Credential reference): Used for basic_with_cred authentication + content_type (string): Request Content-Type (application/json, application/xml, etc.) + timeout (int): Request timeout in seconds (Default: 120) + verify (bool): TLS verify (Default: False) + retries (int): Number of times to retry this request if it fails. (Default: 0) + retry_interval (int): Time to wait in seconds between retries (Default: 10) + status_mapping (dict): Mapping of Response status code (int) to + task status (True: success, False: Failure) + response_paths (dict): Mapping of variable name (str) to path in response (str) + name (str): Task name + target (Ref): Target entity that this task runs under. + tunnel (Ref.Tunnel): Tunnel reference + Returns: + (Task): HTTP Task + """ + auth_obj = {"auth_type": "none"} + + if cred is not None: + cred_ref = _get_target_ref(cred) + if getattr(cred_ref, "kind", None) != "app_credential": + raise ValueError( + "Cred for HTTP task " + + (name or "") + + " should be reference of credential object" + ) + + auth_obj = { + "type": "basic_with_cred", + "credential_local_reference": cred_ref, + } + + elif credential is not None: + if getattr(credential, "__kind__", None) != "app_credential": + raise ValueError( + "Credential for HTTP task " + + (name or "") + + " should be a Credential object of PASSWORD type" + ) + + # TODO: Auth should be changed to basic auth with credential. + # This is dependent on https://jira.nutanix.com/browse/CALM-12149 + # We could also possibly check calm server version to switch between + # the two auth mechanisms since basic auth will be deprecated. + auth_obj = { + "auth_type": "basic", + "basic_auth": { + "username": credential.username, + "password": { + "value": credential.secret.get("value"), + "attrs": {"is_secret_modified": True}, + }, + }, + } + + params = { + "name": name, + "type": "HTTP", + "attrs": { + "method": method, + "url": url, + "authentication": auth_obj, + "connection_timeout": timeout, + "tls_verify": verify, + "retry_count": retries + 1, + "retry_interval": retry_interval, + }, + } + + if relative_url is not None: + params["attrs"]["relative_url"] = relative_url + + if body is not None: + params["attrs"]["request_body"] = body + + if content_type is not None: + params["attrs"]["content_type"] = content_type + + if target is not None: + params["target_any_local_reference"] = _get_target_ref(target) + + header_variables = [] + if headers is not None: + header_variables.extend(_header_variables_from_dict(headers)) + params["attrs"]["headers"] = header_variables + + if secret_headers is not None: + header_variables.extend( + _header_variables_from_dict(secret_headers, secret=True) + ) + params["attrs"]["headers"] = header_variables + + if status_mapping is not None: + LOG.debug("Status mapping for HTTP Task : {}".format(status_mapping)) + if not isinstance(status_mapping, dict): + raise TypeError( + "Status mapping for HTTP task " + + (name or "") + + " should be dictionary of int keys and boolean values" + ) + expected_response = [] + for code, state in status_mapping.items(): + if not isinstance(code, int): + raise TypeError( + "Status mapping for HTTP task " + + (name or "") + + " should be dictionary of int keys and boolean values" + ) + if not isinstance(state, bool): + raise TypeError( + "Status mapping for HTTP task " + + (name or "") + + " should be dictionary of int keys and boolean values" + ) + expected_response.append( + {"status": "SUCCESS" if state else "FAILURE", "code": code} + ) + params["attrs"]["expected_response_params"] = expected_response + + if response_paths is not None: + LOG.debug("Response paths for HTTP Task : {}".format(response_paths)) + if not isinstance(response_paths, dict): + raise TypeError( + "Response paths for HTTP task " + + (name or "") + + " should be dictionary of strings" + ) + for prop, path in response_paths.items(): + if not isinstance(prop, str): + raise TypeError( + "Response paths for HTTP task " + + (name or "") + + " should be dictionary of strings" + ) + if not isinstance(path, str): + raise TypeError( + "Response paths for HTTP task " + + (name or "") + + " should be dictionary of strings" + ) + params["attrs"]["response_paths"] = response_paths + + if "inherit_target" in kwargs: + params["inherit_target"] = kwargs.get("inherit_target") + + if tunnel is not None: + params["attrs"]["tunnel_reference"] = tunnel + + return _task_create(**params) + + +def _deployment_scaling_create(target, scaling_type, scaling_count, name=None): + if not target: + raise ValueError("A target is required for deployment scaling task") + if not isinstance(target, RefType) and isinstance(target, EntityType): + target = target.get_ref() + if not target.kind == "app_blueprint_deployment": + LOG.debug( + "Target for deployment scaling can be 'app_blueprint_deployment' only" + ) + raise ValueError( + "Target for deployment scaling cannot be {}".format(target.kind) + ) + + kwargs = { + "name": name + if name is not None + else "{}_task_for_{}__{}".format( + scaling_type, target.name, str(uuid.uuid4())[:8] + ), + "type": "SCALING", + "attrs": {"scaling_type": scaling_type, "scaling_count": str(scaling_count)}, + "target_any_local_reference": target, + } + + return _task_create(**kwargs) + + +def scale_out_task(count, target, name=None): + """ + Defines a deployment scale out task + Args: + count (str): scaling_count + target (Ref): Target deployment for scale out + name (str): Name for this task + Returns: + (Task): Deployment scale out task + """ + return _deployment_scaling_create(target, "SCALEOUT", count, name=name) + + +def scale_in_task(count, target, name=None): + """ + Defines a deployment scale in task + Args: + count (str): scaling_count + target (Ref): Target deployment for scale in + name (str): Name for this task + Returns: + (Task): Deployment scale in task + """ + return _deployment_scaling_create(target, "SCALEIN", count, name=name) + + +def delay_task(delay_seconds=None, name=None, target=None): + """ + Defines a delay task. + Args: + delay_seconds(int): Delay in seconds + name (str): Name for this task + target (Ref): Target entity for this task + Returns: + (Task): Delay task + """ + if not isinstance(delay_seconds, int): + raise TypeError( + "delay_seconds({}) is expected to be an integer, got {}".format( + delay_seconds, type(delay_seconds) + ) + ) + kwargs = {"name": name, "type": "DELAY", "attrs": {"interval_secs": delay_seconds}} + if target is not None: + kwargs["target_any_local_reference"] = _get_target_ref(target) + return _task_create(**kwargs) + + +def vm_operation(name=None, type="VM_OPERATION", target=None, **kwargs): + """ + Defines a vm_operation task i.e. POWERON/ POWEROFF/ RESTART + Args: + name (str): Name for this task + type(str): Task Type + target (Ref): Target entity for this task + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + (Task): VM Operation task + """ + params = {"name": name, "type": type} + if target is not None: + params["target_any_local_reference"] = _get_target_ref(target) + if "inherit_target" in kwargs: + params["inherit_target"] = kwargs.get("inherit_target") + return _task_create(**params) + + +def input_task(timeout=None, name=None, inputs=[]): + """ + Defines a input task. + Args: + timeout(int): Task timeout in seconds + name (str): Name for this task + inputs (list): list of inputs for the task + Returns: + (Task): Delay task + """ + if not isinstance(timeout, int): + raise TypeError( + "timeout is expected to be an integer, got {}".format(type(timeout)) + ) + kwargs = { + "name": name, + "type": "INPUT", + "attrs": {"task_timeout": timeout, "inputs": []}, + } + for task_input in inputs: + if not isinstance(task_input, TaskInputType): + raise TypeError( + "All inputs is expected to be an TaskInputType, got {}".format( + type(task_input) + ) + ) + kwargs["attrs"]["inputs"].append( + { + "name": task_input.name, + "input_type": task_input.input_type, + "options": task_input.options, + } + ) + return _task_create(**kwargs) + + +def confirm_task(timeout=None, name=None): + """ + Defines a confirm task. + Args: + timeout(int): Task timeout in seconds + name (str): Name for this task + Returns: + (Task): Delay task + """ + if not isinstance(timeout, int): + raise TypeError( + "timeout is expected to be an integer, got {}".format(type(timeout)) + ) + kwargs = {"name": name, "type": "CONFIRM", "attrs": {"task_timeout": timeout}} + return _task_create(**kwargs) + + +class BaseTask: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + class HTTP: + def __new__( + cls, + method, + url, + body=None, + headers=None, + secret_headers=None, + credential=None, + cred=None, + content_type=None, + timeout=120, + verify=False, + retries=0, + retry_interval=10, + status_mapping=None, + response_paths=None, + name=None, + target=None, + tunnel=None, + ): + return http_task( + method, + url, + body=body, + headers=headers, + secret_headers=secret_headers, + credential=credential, + cred=cred, + content_type=content_type, + timeout=timeout, + verify=verify, + retries=retries, + retry_interval=retry_interval, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + tunnel=tunnel, + ) + + get = http_task_get + post = http_task_post + put = http_task_put + delete = http_task_delete + + class SetVariable: + ssh = set_variable_task_ssh + powershell = set_variable_task_powershell + escript = set_variable_task_escript + + class Delay: + def __new__(cls, delay_seconds=None, name=None, target=None): + return delay_task(delay_seconds=delay_seconds, name=name, target=target) + + +class CalmTask(BaseTask): + class Scaling: + scale_in = scale_in_task + scale_out = scale_out_task + + class Exec: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + ssh = exec_task_ssh + powershell = exec_task_powershell + escript = exec_task_escript + + class ConfigExec: + def __new__(cls, config, name=None): + target = config.__self__.attrs_list[0]["target_any_local_reference"] + if not target: + raise Exception( + "Config's target has to be specified for it be used in ConfigExec Task" + ) + return create_call_config(target, config, name) + + +class RunbookTask(BaseTask): + class Decision: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + ssh = decision_task_ssh + powershell = decision_task_powershell + escript = decision_task_escript + + class Exec: + def __new__(cls, *args, **kwargs): + raise TypeError("'{}' is not callable".format(cls.__name__)) + + ssh = exec_task_ssh_runbook + powershell = exec_task_powershell_runbook + escript = exec_task_escript + + class Loop: + def __new__( + cls, + iterations, + name=None, + child_tasks=[], + loop_variable="iteration", + exit_condition=Status.DONT_CARE, + **kwargs, + ): + attrs = {"iterations": str(iterations), "loop_variable": loop_variable} + exit_code = EXIT_CONDITION_MAP.get(exit_condition, None) + if exit_code: + attrs["exit_condition_type"] = exit_code + else: + raise ValueError( + "Valid Exit Conditions for loop are 'Status.SUCCESS/Status.FAILURE/Status.DONT_CARE'." + ) + return while_loop(name=name, child_tasks=child_tasks, attrs=attrs, **kwargs) + + class HTTP: + def __new__( + cls, + method, + relative_url=None, + body=None, + headers=None, + secret_headers=None, + content_type=None, + status_mapping=None, + response_paths=None, + name=None, + target=None, + **kwargs, + ): + return http_task_on_endpoint( + method, + relative_url=relative_url, + body=body, + headers=headers, + secret_headers=secret_headers, + content_type=content_type, + status_mapping=status_mapping, + response_paths=response_paths, + name=name, + target=target, + **kwargs, + ) + + get = http_task_get_on_endpoint + post = http_task_post_on_endpoint + put = http_task_put_on_endpoint + delete = http_task_delete_on_endpoint + + class Input: + def __new__(cls, timeout=500, name=None, inputs=[]): + return input_task(timeout=timeout, name=name, inputs=inputs) + + class Confirm: + def __new__(cls, timeout=500, name=None): + return confirm_task(timeout=timeout, name=name) + + class VMPowerOn: + def __new__(cls, name=None, target=None, **kwargs): + return vm_operation(name=name, type="VM_POWERON", target=target, **kwargs) + + class VMPowerOff: + def __new__(cls, name=None, target=None, **kwargs): + return vm_operation(name=name, type="VM_POWEROFF", target=target, **kwargs) + + class VMRestart: + def __new__(cls, name=None, target=None, **kwargs): + return vm_operation(name=name, type="VM_RESTART", target=target, **kwargs) diff --git a/framework/calm/dsl/builtins/models/task_input.py b/framework/calm/dsl/builtins/models/task_input.py new file mode 100644 index 0000000..8831c28 --- /dev/null +++ b/framework/calm/dsl/builtins/models/task_input.py @@ -0,0 +1,49 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .constants import TASK_INPUT + + +# TaskInput + + +class TaskInputType(EntityType): + __schema_name__ = "TaskInput" + __openapi_type__ = "task_input" + + +class TaskInputValidator(PropertyValidator, openapi_type="task_input"): + __default__ = None + __kind__ = TaskInputType + + +def _task_input(*args, **kwargs): + name = kwargs.get("name", None) + input_type = kwargs.get("input_type", None) + options = kwargs.get("options", []) + if not name: + if len(args) > 0 and isinstance(args[0], str): + kwargs["name"] = args[0] + name = args[0] + else: + raise ValueError("Input name is required property") + if input_type is None: + kwargs["input_type"] = TASK_INPUT.TYPE.TEXT + elif input_type not in TASK_INPUT.VALID_TYPES: + raise ValueError( + "Input type is not valid. Supported input types are {}.".format( + TASK_INPUT.VALID_TYPES + ) + ) + + if ( + input_type == TASK_INPUT.TYPE.SELECT + or input_type == TASK_INPUT.TYPE.SELECTMULTIPLE + ): + if len(options) == 0: + raise ValueError( + "There must be atleast one option for input of type {}.".format( + input_type + ) + ) + bases = (Entity,) + return TaskInputType(name, bases, kwargs) diff --git a/framework/calm/dsl/builtins/models/utils.py b/framework/calm/dsl/builtins/models/utils.py new file mode 100644 index 0000000..4f78966 --- /dev/null +++ b/framework/calm/dsl/builtins/models/utils.py @@ -0,0 +1,154 @@ +import os +import sys +import inspect +import json + +from ruamel import yaml +import re +from calm.dsl.log import get_logging_handle +from calm.dsl.config import get_context + +LOG = get_logging_handle(__name__) + + +def read_file(filename, depth=1): + """reads the file""" + + if not filename: + raise ValueError("filename not supplied") + + # Expanding filename + filename = os.path.expanduser(filename) + file_path = os.path.join( + os.path.dirname(inspect.getfile(sys._getframe(depth))), filename + ) + + if not file_exists(file_path): + LOG.debug("file {} not found at location {}".format(filename, file_path)) + raise ValueError("file {} not found".format(filename)) + + with open(file_path, "r") as data: + return data.read() + + +def _get_caller_filepath(filename, depth=2): + + return os.path.abspath( + os.path.join(os.path.dirname(inspect.getfile(sys._getframe(depth))), filename) + ) + + +def read_env(relpath=".env"): + """ + read_env() reads from env file present in blueprint directory. + If it does not exist, it returns os env present in os.environ. + Custom env file location can also be given with relpath param. + relpath will look for file relative to blueprint top-level directory. + Example: relpath=".env2", relpath="env/dev", etc. + + :param relpath: Blueprint env path starting from blueprint dir. (default: "$BP_DIR/.env") + :type relpath: str + :return: env dict containing local & os env + :rtype: dict + """ + + # Init env + os_env = dict(os.environ) + + # Get filepath + filepath = _get_caller_filepath(relpath) + + LOG.debug("Reading env from file: {}".format(filepath)) + + # Check if file path exists + if not os.path.exists(filepath): + LOG.warning("env file {} not found.".format(filepath)) + return os_env + + # Read env + with open(filepath, "r") as f: + content = f.readlines() + + local_env_list = [] + for line in content: + if not line.startswith("#") and "=" in line: + # Remove any whitespace characters + line = line.strip() + + # Get env name & value + name, value = line.split("=", 1) + + # Remove any extra whitespaces + name = name.strip() + value = value.strip() + + # Remove any comments given after value + value = value.split("#")[0].strip() + + # Remove any quotes in value, if present. + value = value.strip('"').strip("'") + + local_env_list.append((name, value)) + + local_env = dict(local_env_list) + LOG.debug( + "Got local env:\n{}".format( + json.dumps(local_env, indent=4, separators=(",", ": ")) + ) + ) + + # Give priority to local env over OS env + env = {**os_env, **local_env} + + return env + + +def file_exists(file_path): + return os.path.exists(file_path) + + +def read_local_file(filename): + file_path = os.path.join(".local", filename) + + # Checking if file exists + abs_file_path = os.path.join( + os.path.dirname(inspect.getfile(sys._getframe(1))), file_path + ) + + # If not exists read from home directory + if not file_exists(abs_file_path): + ContextObj = get_context() + init_data = ContextObj.get_init_config() + file_path = os.path.join(init_data["LOCAL_DIR"]["location"], filename) + return read_file(file_path, 0).rstrip() # To remove \n, use rstrip + + return read_file(file_path, depth=2) + + +def str_presenter(dumper, data): + """For handling multiline strings""" + if len(data.splitlines()) > 1: # check for multiline string + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + return dumper.represent_scalar("tag:yaml.org,2002:str", data) + + +yaml.add_representer(str, str_presenter) + + +def get_valid_identifier(data=None): + """returns a valid indentifier out of string""" + + if not data: + return data + + if data.isidentifier(): + return data + + # Will remove all unwanted characters + data = re.sub("[^0-9a-zA-Z_]", "", data) + + # Still it is an invalid indentifier, it will append "_" i.e underscore at start + if not data.isidentifier(): + data = "_{}".format(data) + + return data diff --git a/framework/calm/dsl/builtins/models/validator.py b/framework/calm/dsl/builtins/models/validator.py new file mode 100644 index 0000000..6d54021 --- /dev/null +++ b/framework/calm/dsl/builtins/models/validator.py @@ -0,0 +1,166 @@ +from copy import deepcopy + + +class _PropertyValidatorBase: + subclasses = {} + + def __init_subclass__(cls, openapi_type, **kwargs): + super().__init_subclass__(**kwargs) + + if openapi_type is not None: + + # register validator plugins + cls.subclasses[openapi_type] = cls + + +def get_property_validators(): + return _PropertyValidatorBase.subclasses + + +class PropertyValidator(_PropertyValidatorBase, openapi_type=None): + + __default__ = None + __kind__ = None + + # @classmethod + # def get_default(cls, is_array): + # return cls.__default__ if not is_array else [] + + @classmethod + def get_default(cls, is_array): + default = None + class_default = cls.__default__ + if not callable(class_default): + default = lambda: deepcopy(class_default) # noqa: E731 + else: + default = class_default + return default if not is_array else list + + @classmethod + def get_kind(cls): + return cls.__kind__ + + @classmethod + def _validate_item(cls, value): + + if isinstance(value, type(None)): + return + + kind = cls.get_kind() + # Value may be a class or an object + # If not an class, check for metaclass for object's class(Ex: Provider Spec) + if not ( + isinstance(value, kind) + or isinstance(type(value), kind) + or (hasattr(kind, "validate_dict") and (not kind.validate_dict(value))) + ): + raise TypeError("{} is not of type {}".format(value, kind)) + + @staticmethod + def _validate_list(values): + if not isinstance(values, list): + raise TypeError("{} is not of type {}".format(values, list)) + + @classmethod + def validate(cls, value, is_array): + + if not is_array: + cls._validate_item(value) + else: + cls._validate_list(value) + for v in value: + cls._validate_item(v) + + # Note on __set__() interface: + # Initial plan was to use PropertyValidator as descriptors and make use of the magical + # __set__() interface to validate values. + # The validator objects were set as attributes on metaclass so that the + # __set__() interface which would call the right validator. But this does not work for + # type objects (e.g. - class object) as these attributes cannot be assigned outside + # its scope. More details in the else block below. + + # def __set__(self, instance, value): + # """ The below dict assignment does not work for type objects like classes as + # `cls.__dict__` is immutable and exposed through a `mappingproxy` which + # is read-only. + + # `object.__setattr__(instance, name, value)` will also not work as this specifically + # checks if the first argument is a subclass of `object`. `instance` here would + # be a `type` object for class and hence this will fail. + # The check is there to prevent this method being used to modify built-in + # types (Carlo Verre hack). + + # So, descriptors cannot work in current form on type objects as class + # attributes are stored as `mappingproxy` objects. So only + # `class.__setattr__` remains as an avenue for setting class attributes. + + # Now, `setattr` works by looking for a data descriptor in + # `type(obj).__mro__`. If a data descriptor is found (i.e. if __set__ is defined), + # it calls `__set__` and exits. + # There is no way to avoid this, and uderstandably so, as this is the purpose + # of the magical `__set__` interface. + + # But, as `class.__dict__` cannot be used to set attribute, + # `setattr(cls, self.name, value)` is the only way. + # Calling setattr inside this block will cause infinite recursion! + # Else block below has more details. + + # """ + + # self.validate(value, is_array) + + # # Does not work if instance is a type object. + # if not isinstance(instance, type): + # instance.__dict__[self.name] = value + # # This works fine. + # else: + # # setattr(instance, self.name, value) + # # This would call __set__ again and hence cause infinite recusion. + + # # type.__setattr__(instance, self.name, value) + # # This would call __set__ again and hence cause infinite recusion. + + # # instance.__dict__[self.name] = value + # # Item assignment for mappingproxy object is not allowed. + + # # object.__setattr__(instance, self.name, value) + # # This does not work as `instance` is a type object. + + # # Sorry, can't do anything! + + # pass + + # To overcome this problem: + # a __validator_dict__ dictionary is set on the metaclass which has the correct + # validator type mappings based on property name and validator methods have + # been made as classmethods. + # These mappings are checked during class creation using a hook to + # __prepare__() and during attribute assignmets by using a hook to __setattr__() + # interfaces. More details in EntityType.__prepare__() methods. + + +# built-in validators + + +class StringValidator(PropertyValidator, openapi_type="string"): + + __default__ = str + __kind__ = str + + +class IntValidator(PropertyValidator, openapi_type="integer"): + + __default__ = int + __kind__ = int + + +class BoolValidator(PropertyValidator, openapi_type="boolean"): + + __default__ = bool + __kind__ = bool + + +class DictValidator(PropertyValidator, openapi_type="dict"): + + __default__ = dict + __kind__ = dict diff --git a/framework/calm/dsl/builtins/models/variable.py b/framework/calm/dsl/builtins/models/variable.py new file mode 100644 index 0000000..536b35e --- /dev/null +++ b/framework/calm/dsl/builtins/models/variable.py @@ -0,0 +1,1486 @@ +import re + +from .entity import EntityType, Entity +from .validator import PropertyValidator +from .task_input import _task_input + + +# Variable + +VARIABLE_VALUE_TYPES = { + "int": "INT", + "date": "DATE", + "time": "TIME", + "dict": "DICT", + "string": "STRING", + "data_time": "DATE_TIME", + "multiline_string": "MULTILINE_STRING", +} + +VARIABLE_DATA_TYPES = { + "base": "BASE", + "list": "LIST", + "single_select_list": "SINGLE_SELECT_LIST", +} + + +class VariableType(EntityType): + __schema_name__ = "Variable" + __openapi_type__ = "app_variable" + + def compile(cls): + cdict = super().compile() + if not cdict.get("options", {}): + del cdict["options"] + if not cdict.get("regex", {}): + del cdict["regex"] + if not cdict.get("editables", {}): + del cdict["editables"] + + if cdict.get("options", None): + options = cdict["options"] + # Only EScript/HTTP request info needed for dynamically fetching options + if options["type"] == "PREDEFINED": + del options["attrs"] + else: + del options["choices"] # Choices are only for PREDEFINED Type + + return cdict + + +class VariableValidator(PropertyValidator, openapi_type="app_variable"): + __default__ = None + __kind__ = VariableType + + +def _var(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return VariableType(name, bases, kwargs) + + +Variable = _var() + + +def setvar(name, value, type_="LOCAL", **kwargs): + + kwargs["name"] = name + if value is not None: + kwargs["value"] = value + kwargs["type"] = type_ + + return VariableType(name, (Variable,), kwargs) + + +def simple_variable( + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + kwargs = {"is_hidden": is_hidden, "is_mandatory": is_mandatory} + editables = {} + if runtime: + editables = {"value": True} + kwargs["editables"] = editables + if label is not None: + kwargs["label"] = label + if regex is not None: + if not isinstance(regex, str): + raise TypeError( + "Expected string in field regex for variable " + + (name or "") + + ", got {}".format(type(regex)) + ) + if validate_regex and regex and value: + regex_result = re.match(regex, value) + if not regex_result: + raise ValueError( + "Value '{}' doesn't match with specified regex '{}'".format( + value, regex + ) + ) + + regex = {"value": regex, "should_validate": validate_regex} + kwargs["regex"] = regex + if description is not None: + kwargs["description"] = description + + return setvar(name, value, **kwargs) + + +def simple_variable_secret( + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + kwargs = {"is_hidden": is_hidden, "is_mandatory": is_mandatory} + editables = {} + if runtime: + editables = {"value": True} + kwargs["editables"] = editables + if label is not None: + kwargs["label"] = label + if regex is not None: + if not isinstance(regex, str): + raise TypeError( + "Expected string in field regex for variable " + + (name or "") + + ", got {}".format(type(regex)) + ) + if validate_regex and regex and value: + regex_result = re.match(regex, value) + if not regex_result: + raise ValueError( + "Value '{}' doesn't match with specified regex '{}'".format( + value, regex + ) + ) + + regex = {"value": regex, "should_validate": validate_regex} + kwargs["regex"] = regex + if description is not None: + kwargs["description"] = description + return setvar(name, value, type_="SECRET", **kwargs) + + +def _advanced_variable( + type_, + name=None, + value="", + label=None, + task=None, + value_type=None, + data_type=None, + regex=None, + validate_regex=False, + options=None, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + kwargs = {"name": name, "value": value, "type_": type_} + if runtime: + kwargs["editables"] = {"value": True} + if label is not None: + kwargs["label"] = label + if task is not None: + if not getattr(task, "__kind__") == "app_task": + raise TypeError( + "Expected a Task for variable " + + (name or "") + + ", got {}".format(type(task)) + ) + task_attrs = task.compile().get("attrs") + if not task_attrs: + raise ValueError("Task for variable " + (name or "") + ", is not valid.") + task_type = getattr(task, "type") + if task_type not in ["HTTP", "EXEC"]: + raise ValueError( + "Task type for variable " + + (name or "") + + ", is not valid, Expected one of" + + " ['HTTP', 'EXEC'], got {}".format(task_type) + ) + task_attrs["type"] = task_type + kwargs["type_"] = task_type + "_" + type_ + kwargs["options"] = {"type": task_type, "attrs": task_attrs} + if value_type is not None: + value_type = value_type.upper() + if value_type not in VARIABLE_VALUE_TYPES.values(): + raise ValueError( + "Value type for variable " + + (name or "") + + ", is not valid, Expected one of" + + " {}, got {}".format(list(VARIABLE_VALUE_TYPES.values()), value_type) + ) + kwargs["value_type"] = value_type + if data_type is not None: + data_type = data_type.upper() + if data_type not in VARIABLE_DATA_TYPES.values(): + raise ValueError( + "Data type for variable " + + (name or "") + + ", is not valid, Expected one of" + + " {}, got {}".format(list(VARIABLE_DATA_TYPES.values()), data_type) + ) + kwargs["data_type"] = data_type + if regex is not None: + if not isinstance(regex, str): + raise TypeError( + "Expected string in field regex for variable " + + (name or "") + + ", got {}".format(type(regex)) + ) + regex = {"value": regex, "should_validate": validate_regex} + kwargs["regex"] = regex + if options is not None: + if kwargs.get("options", None) is not None: + raise ValueError( + "Variable options for variable " + + (name or "") + + "cannot be specified since it is being " + + "fetched from a {} task".format(kwargs["options"]["type"]) + ) + if not isinstance(options, list): + raise TypeError( + "Expected list of options for variable " + + (name or "") + + ", got {}".format(type(options)) + ) + choices = [] + for choice in options: + if not isinstance(choice, str): + raise TypeError( + "Expected list of string choices for options for variable " + + (name or "") + + ", got {}".format(type(choice)) + ) + if validate_regex and regex: + regex_result = re.match(regex["value"], choice) + if not regex_result: + raise ValueError( + "Option '{}' doesn't match with specified regex '{}'".format( + choice, regex["value"] + ) + ) + + choices.append(choice) + if isinstance(value, list) and data_type == "LIST": + for val in value: + if not isinstance(val, str): + raise TypeError( + "Expected list of string defaults for variable " + + (name or "") + + ", got {}".format(type(val)) + ) + if val not in choices: + raise TypeError( + "Default value for variable array with options " + + (name or "") + + ", contains {}, which is not one of the options".format(val) + ) + value = ",".join(value) + kwargs["value"] = value + if value is None and len(choices) > 0: + value = choices[0] + kwargs["value"] = value + if data_type != "LIST" and value not in choices: + raise TypeError( + "Default value for variable with options " + + (name or "") + + ", is {}, which is not one of the options".format(value) + ) + options = {"type": "PREDEFINED", "choices": choices} + kwargs["options"] = options + else: + # If options are None, just regex validate the value + if validate_regex and regex and value: + regex_result = re.match(regex["value"], value) + if not regex_result: + raise ValueError( + "Value '{}' doesn't match with specified regex '{}'".format( + value, regex["value"] + ) + ) + if is_hidden is not None: + kwargs["is_hidden"] = bool(is_hidden) + if is_mandatory is not None: + kwargs["is_mandatory"] = bool(is_mandatory) + if description is not None: + kwargs["description"] = description + + return setvar(**kwargs) + + +def simple_variable_int( + value, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=value, + label=label, + value_type="INT", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_date( + value, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=value, + label=label, + value_type="DATE", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_time( + value, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=value, + label=label, + value_type="TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_datetime( + value, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=value, + label=label, + value_type="DATE_TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_multiline( + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=value, + label=label, + value_type="MULTILINE_STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_int_secret( + value, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "SECRET", + name=name, + value=value, + label=label, + value_type="INT", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_date_secret( + value, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "SECRET", + name=name, + value=value, + label=label, + value_type="DATE", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_time_secret( + value, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "SECRET", + name=name, + value=value, + label=label, + value_type="TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_datetime_secret( + value, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "SECRET", + name=name, + value=value, + label=label, + value_type="DATE_TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def simple_variable_multiline_secret( + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "SECRET", + name=name, + value=value, + label=label, + value_type="MULTILINE_STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_string_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_int_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="INT", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_date_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="DATE", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_time_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_datetime_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="DATE_TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_multiline_with_predefined_options( + options, + default=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=default, + label=label, + value_type="MULTILINE_STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_string_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="STRING", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_int_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="INT", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_date_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="DATE", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_time_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="TIME", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_datetime_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="DATE_TIME", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_multiline_with_predefined_options_array( + options, + defaults=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + value=defaults, + label=label, + value_type="MULTILINE_STRING", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + options=options, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + +def variable_string_with_options_from_task( + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_int_with_options_from_task( + task, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="INT", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_date_with_options_from_task( + task, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="DATE", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_time_with_options_from_task( + task, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_datetime_with_options_from_task( + task, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="DATE_TIME", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_multiline_with_options_from_task( + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="MULTILINE_STRING", + data_type="BASE", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_string_with_options_from_task_array( + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="STRING", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_int_with_options_from_task_array( + task, + name=None, + label=None, + regex=r"^[\d]*$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="INT", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_date_with_options_from_task_array( + task, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="DATE", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_time_with_options_from_task_array( + task, + name=None, + label=None, + regex=r"^[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="TIME", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_datetime_with_options_from_task_array( + task, + name=None, + label=None, + regex=r"^((0[1-9]|[12]\d|3[01])/(0[1-9]|1[0-2])/[12]\d{3})((T)|(\s-\s))[\d]{2}:[\d]{2}(:[0-5]\d)?$", + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="DATE_TIME", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +def variable_multiline_with_options_from_task_array( + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", +): + return _advanced_variable( + "LOCAL", + name=name, + label=label, + value_type="MULTILINE_STRING", + data_type="LIST", + regex=regex, + validate_regex=validate_regex, + task=task, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=True, + description=description, + ) + + +class CalmVariable: + def __new__( + cls, + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return simple_variable( + value, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + class Simple: + def __new__( + cls, + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return simple_variable( + value, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + string = simple_variable + int = simple_variable_int + date = simple_variable_date + time = simple_variable_time + datetime = simple_variable_datetime + multiline = simple_variable_multiline + + class Secret: + def __new__( + cls, + value, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return simple_variable_secret( + value, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + string = simple_variable_secret + int = simple_variable_int_secret + date = simple_variable_date_secret + time = simple_variable_time_secret + datetime = simple_variable_datetime_secret + multiline = simple_variable_multiline_secret + + class WithOptions: + def __new__( + cls, + options, + default=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return variable_string_with_predefined_options( + options, + default=default, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + class Predefined: + def __new__( + cls, + options, + default=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return variable_string_with_predefined_options( + options, + default=default, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + string = variable_string_with_predefined_options + int = variable_int_with_predefined_options + date = variable_date_with_predefined_options + time = variable_time_with_predefined_options + datetime = variable_datetime_with_predefined_options + multiline = variable_multiline_with_predefined_options + + class Array: + def __new__( + cls, + options, + defaults=None, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + runtime=False, + description="", + ): + return variable_string_with_predefined_options_array( + options, + defaults=defaults, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + runtime=runtime, + description=description, + ) + + string = variable_string_with_predefined_options_array + int = variable_int_with_predefined_options_array + date = variable_date_with_predefined_options_array + time = variable_time_with_predefined_options_array + datetime = variable_datetime_with_predefined_options_array + multiline = variable_multiline_with_predefined_options_array + + class FromTask: + def __new__( + cls, + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", + ): + return variable_string_with_options_from_task( + task, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + description=description, + ) + + string = variable_string_with_options_from_task + int = variable_int_with_options_from_task + date = variable_date_with_options_from_task + time = variable_time_with_options_from_task + datetime = variable_datetime_with_options_from_task + multiline = variable_multiline_with_options_from_task + + class Array: + def __new__( + cls, + task, + name=None, + label=None, + regex=None, + validate_regex=False, + is_hidden=False, + is_mandatory=False, + description="", + ): + return variable_string_with_options_from_task_array( + task, + name=name, + label=label, + regex=regex, + validate_regex=validate_regex, + is_hidden=is_hidden, + is_mandatory=is_mandatory, + description=description, + ) + + string = variable_string_with_options_from_task_array + int = variable_int_with_options_from_task_array + date = variable_date_with_options_from_task_array + time = variable_time_with_options_from_task_array + datetime = variable_datetime_with_options_from_task_array + multiline = variable_multiline_with_options_from_task_array + + +class RunbookVariable(CalmVariable): + class TaskInput: + def __new__(cls, *args, **kwargs): + return _task_input(*args, **kwargs) diff --git a/framework/calm/dsl/builtins/models/vm_blueprint.py b/framework/calm/dsl/builtins/models/vm_blueprint.py new file mode 100644 index 0000000..0df4c97 --- /dev/null +++ b/framework/calm/dsl/builtins/models/vm_blueprint.py @@ -0,0 +1,92 @@ +import sys + +from .entity import EntityType, Entity +from .validator import PropertyValidator + +from .ref import ref +from .action import action as Action +from .blueprint import blueprint +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Simple Blueprint + + +class VmBlueprintType(EntityType): + __schema_name__ = "VmBlueprint" + __openapi_type__ = "app_vm_blueprint" + __has_dag_target__ = False + + def get_task_target(cls): + return + + def make_bp_obj(cls): + """returns blueprint object""" + + # Extracting Vm Profiles + vm_profiles = getattr(cls, "profiles", []) + + # create blueprint credentials + bp_credentials = cls.credentials + + bp_name = getattr(cls, "name", None) or cls.__name__ + if not vm_profiles: + LOG.error("No vm profile provided for blueprint") + sys.exit(-1) + + bp_services = [] + bp_packages = [] + bp_substrates = [] + bp_profiles = [] + + # Extracting blueprint entities + + # Extracting service, as it should be same across profiles + vp_data = vm_profiles[0].get_bp_classes() + bp_svc = vp_data["service"] + bp_services = [bp_svc] + + for vp in vm_profiles: + vp_data = vp.get_bp_classes() + + pkg = vp_data["package"] + pkg.services = [ref(bp_svc)] + + subt = vp_data["substrate"] + + pfl = vp_data["profile"] + + # Set service as reference to profile actions + for k, v in pfl.__dict__.items(): + if isinstance(v, Action): + v.task_target = ref(bp_svc) + + bp_packages.append(pkg) + bp_substrates.append(subt) + bp_profiles.append(pfl) + + bp_obj = blueprint( + name=bp_name, + packages=bp_packages, + services=bp_services, + substrates=bp_substrates, + credentials=bp_credentials, + profiles=bp_profiles, + ) + + return bp_obj + + +class VmBlueprintValidator(PropertyValidator, openapi_type="app_vm_blueprint"): + __default__ = None + __kind__ = VmBlueprintType + + +def vm_blueprint(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return VmBlueprintType(name, bases, kwargs) + + +VmBlueprint = vm_blueprint() diff --git a/framework/calm/dsl/builtins/models/vm_disk_package.py b/framework/calm/dsl/builtins/models/vm_disk_package.py new file mode 100644 index 0000000..da5c814 --- /dev/null +++ b/framework/calm/dsl/builtins/models/vm_disk_package.py @@ -0,0 +1,181 @@ +from .provider_spec import read_spec +from .package import PackageType +from .validator import PropertyValidator +from .entity import Entity +from .utils import get_valid_identifier +from calm.dsl.log import get_logging_handle + + +# Downloadable Image + +# Constants +ImageType = "DISK_IMAGE" +ImageArchitecture = "X86_64" +ProductVersion = "1.0" +ConfigSections = ["image", "product", "checksum"] +LOG = get_logging_handle(__name__) + + +class VmDiskPackageType(PackageType): + __schema_name__ = "VmDiskPackage" + __openapi_type__ = "app_vm_disk_package" + + def get_ref(cls, kind=None): + """Note: app_package kind to be used for downloadable image""" + return super().get_ref(kind=PackageType.__openapi_type__) + + def get_dict(cls): + + attrs = cls.get_all_attrs() + # convert keys to api schema + cdict = {} + display_map = getattr(type(cls), "__display_map__") + for k, v in attrs.items(): + if getattr(v, "__is_object__", False): + cdict.setdefault(display_map[k], v.get_dict()) + cdict.setdefault(display_map[k], v) + + # Add name & description if present + if "name" in cdict and cdict["name"] == "": + cdict["name"] = cls.__name__ + + if "description" in cdict and cdict["description"] == "": + cdict["description"] = cls.__doc__ if cls.__doc__ else "" + + return cdict + + def compile(cls): + config = super().compile() + + pkg_name = cls.__name__ + pkg_doc = config["description"] + + kwargs = { + "type": "SUBSTRATE_IMAGE", + "name": config["name"], + "options": { + "name": config["image"].get("name") or pkg_name, + "description": "", + "resources": { + "image_type": config["image"].get("type") or ImageType, + "source_uri": config["image"].get("source_uri") or "", + "version": { + "product_version": config["product"].get("version") + or "" + or ProductVersion, + "product_name": config["product"].get("name") or pkg_name, + }, + "architecture": config["image"].get( + "architecture", ImageArchitecture + ), + }, + }, + "description": pkg_doc, + } + + # If image is ISO type, search for checksum data + if kwargs["options"]["resources"]["image_type"] == "ISO_IMAGE": + kwargs["options"]["resources"]["checksum"] = { + "checksum_algorithm": config["checksum"].get("algorithm", ""), + "checksum_value": config["checksum"].get("value", ""), + } + + pkg = PackageType(pkg_name, (Entity,), kwargs) + + # return the compile version of package + return pkg.compile() + + @classmethod + def decompile(mcls, cdict, context=[], prefix=""): + """decompile method for downloadble images""" + + name = cdict.get("name") or "" + description = cdict.get("description") or "" + + options = cdict["options"] + resources = options.get("resources", {}) + + img_type = resources["image_type"] + config = { + "image": { + "name": options.get("name", ""), + "type": resources.get("image_type", ""), + "source": resources.get("source_uri", ""), + "architecture": resources.get("architecture", ""), + } + } + + if resources.get("version", None): + config["product"] = { + "name": resources["version"].get("product_name") or "", + "version": resources["version"].get("product_version") or "", + } + + if img_type == "ISO_IMAGE" and resources.get("checksum", None): + config["checksum"] = { + "algorithm": resources["checksum"].get("checksum_algorithm") or "", + "value": resources["checksum"].get("checksum_value") or "", + } + + config["description"] = description + config["name"] = name + + pkg_name = "{}{}".format(prefix, config["name"]) + pkg_name = get_valid_identifier(pkg_name) + + # vm_disk_package name should neither be `Package` nor `VmDiskPackage` + if pkg_name == PackageType.__schema_name__: + raise TypeError("{} is a reserved name for this entity".format(pkg_name)) + + cls = mcls(pkg_name, (Entity,), config) + cls.__doc__ = description + + return cls + + +class VmDiskPackageValidator(PropertyValidator, openapi_type="app_vm_disk_package"): + __default__ = None + __kind__ = VmDiskPackageType + + +def vm_disk_package(name="", description="", config_file=None, config={}): + + if not (config_file or config): + raise ValueError("Downloadable image configuration not found !!!") + + if not config: + config = read_spec(filename=config_file, depth=2) + + if not isinstance(config, dict): + LOG.debug("Downloadable Image Config: {}".format(config)) + raise TypeError("Downloadable image configuration is not of type dict") + + config["description"] = description or config.get("description", "") + name = name or config.get("name") or getattr(VmDiskPackageType, "__schema_name__") + bases = (Entity,) + + # Check for given sections, if not present add an empty one + for section in ConfigSections: + if section not in config: + config[section] = {} + + # Convert product version and checksum value to string + config["product"]["version"] = str(config["product"].get("version", "")) + config["checksum"]["value"] = str(config["checksum"].get("value", "")) + + return VmDiskPackageType(name, bases, config) + + +def ahv_vm_disk_package(name="", description="", config_file=None, config_data={}): + + if not (config_file or config_data): + raise ValueError("Downloadable image configuration not found !!!") + + if not config_data: + config_data = read_spec(filename=config_file, depth=2) + + if not isinstance(config_data, dict): + LOG.debug("Downloadable Image Config: {}".format(config_data)) + raise TypeError("Downloadable image configuration is not of type dict") + + return vm_disk_package(name=name, description=description, config=config_data) diff --git a/framework/calm/dsl/builtins/models/vm_profile.py b/framework/calm/dsl/builtins/models/vm_profile.py new file mode 100644 index 0000000..22d2573 --- /dev/null +++ b/framework/calm/dsl/builtins/models/vm_profile.py @@ -0,0 +1,102 @@ +from .entity import EntityType, Entity +from .validator import PropertyValidator + +from .profile import profile +from .deployment import deployment +from .substrate import substrate +from .service import service +from .package import package +from .ref import ref +from .action import action as Action +from .variable import VariableType as Variable +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +# Simple Blueprint + + +class VmProfileType(EntityType): + __schema_name__ = "VmProfile" + __openapi_type__ = "app_vm_profile" + __has_dag_target__ = False + + def get_task_target(cls): + return + + def get_bp_classes(cls): + + profile_name = getattr(cls, "name", None) or cls.__name__ + + bp_service = service(name=profile_name + "Service") + + # create blueprint package + bp_pkg = package(name=profile_name + "Package", services=[ref(bp_service)]) + + # create blueprint substrate + bp_sub = substrate( + name=profile_name + "Substrate", + provider_type=cls.provider_type, + provider_spec=cls.provider_spec, + readiness_probe=cls.readiness_probe, + os_type=cls.os_type, + ) + + # create blueprint deployment + bp_dep = deployment( + name=profile_name + "Deployment", + min_replicas=cls.min_replicas, + max_replicas=cls.max_replicas, + packages=[ref(bp_pkg)], + substrate=ref(bp_sub), + ) + + # create blueprint profile + pfl_kwargs = {"name": profile_name, "deployments": [bp_dep]} + + environments = getattr(cls, "environments", None) + if environments: + pfl_kwargs["environments"] = environments + + bp_profile = profile(**pfl_kwargs) + + # Traverse over mro dict of class + cls_data = cls.get_default_attrs() + for klass in reversed(cls.mro()): + cls_data = {**cls_data, **klass.__dict__} + + # Separate class action under packages, substrates and profile + for k, v in cls_data.items(): + if isinstance(v, Action): + if k in ["__install__", "__uninstall__"]: + setattr(bp_pkg, k, v) + + elif k in ["__pre_create__", "__post_delete__"]: + setattr(bp_sub, k, v) + + else: + setattr(bp_profile, k, v) + + elif isinstance(v, Variable): + setattr(bp_profile, k, v) + + return { + "service": bp_service, + "package": bp_pkg, + "substrate": bp_sub, + "profile": bp_profile, + } + + +class VmProfileValidator(PropertyValidator, openapi_type="app_vm_profile"): + __default__ = None + __kind__ = VmProfileType + + +def vm_profile(**kwargs): + name = kwargs.get("name", None) + bases = (Entity,) + return VmProfileType(name, bases, kwargs) + + +VmProfile = vm_profile() diff --git a/framework/calm/dsl/cli/__init__.py b/framework/calm/dsl/cli/__init__.py new file mode 100644 index 0000000..a83d9eb --- /dev/null +++ b/framework/calm/dsl/cli/__init__.py @@ -0,0 +1,32 @@ +from .main import main +from .bp_commands import * # NoQA +from .app_commands import * # NoQA +from .runbook_commands import * # NoQA +from .library_tasks_commands import * # NoQA +from .endpoint_commands import * # NoQA +from .config_commands import * # NoQA +from .account_commands import * # NoQA +from .project_commands import * # NoQA +from .secret_commands import * # NoQA +from .cache_commands import * # NoQA +from .completion_commands import * # NoQA +from .init_command import * # NoQA +from calm.dsl.api import get_api_client +from .marketplace_bp_commands import * # NoQA +from .marketplace_item_commands import * # NoQA +from .marketplace_runbook_commands import * # NoQA +from .app_icon_commands import * # NoQA +from .user_commands import * # NoQA +from .group_commands import * # NoQA +from .role_commands import * # NoQA +from .directory_service_commands import * # NoQA +from .acp_commands import * # NoQA +from .task_commands import * # NoQA +from .brownfield_commands import * # NoQA +from .environment_commands import * # NoQA +from .protection_policy_commands import * # NoQA +from .vm_recovery_point_commands import * # NoQA +from .scheduler_commands import * # NoQA +from .network_group_commands import * # NoQA + +__all__ = ["main", "get_api_client"] diff --git a/framework/calm/dsl/cli/account_commands.py b/framework/calm/dsl/cli/account_commands.py new file mode 100644 index 0000000..cb64d77 --- /dev/null +++ b/framework/calm/dsl/cli/account_commands.py @@ -0,0 +1,60 @@ +import click + +from .accounts import get_accounts, delete_account, describe_account, sync_account +from .main import get, delete, describe, sync + + +@get.command("accounts") +@click.option("--name", "-n", default=None, help="Search for provider account by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter projects by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only account names" +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +@click.option( + "--type", + "account_type", + default=None, + multiple=True, + help="Search for accounts of specific provider", + type=click.Choice( + ["aws", "k8s", "vmware", "azure", "gcp", "nutanix", "custom_provider"] + ), +) +def _get_accounts(name, filter_by, limit, offset, quiet, all_items, account_type): + """Get accounts, optionally filtered by a string""" + + get_accounts(name, filter_by, limit, offset, quiet, all_items, account_type) + + +@delete.command("account") +@click.argument("account_names", nargs=-1) +def _delete_account(account_names): + """Deletes a account from settings""" + + delete_account(account_names) + + +@describe.command("account") +@click.argument("account_name") +def _describe_account(account_name): + """Describe a account""" + + describe_account(account_name) + + +@sync.command("account", feature_min_version="3.0.0") +@click.argument("account_name") +def _sync_account(account_name): + """Sync a platform account + Args: account_name (string): name of the account to sync""" + + sync_account(account_name) diff --git a/framework/calm/dsl/cli/accounts.py b/framework/calm/dsl/cli/accounts.py new file mode 100644 index 0000000..2ba6fa7 --- /dev/null +++ b/framework/calm/dsl/cli/accounts.py @@ -0,0 +1,482 @@ +import time +import click +import arrow +import sys +from prettytable import PrettyTable +from distutils.version import LooseVersion as LV + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.config import get_context + +from .utils import get_name_query, get_states_filter, highlight_text +from .constants import ACCOUNT +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE + +LOG = get_logging_handle(__name__) + + +def get_accounts(name, filter_by, limit, offset, quiet, all_items, account_type): + """Get the accounts, optionally filtered by a string""" + + client = get_api_client() + calm_version = Version.get_version("Calm") + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if account_type: + filter_query += ";(type=={})".format(",type==".join(account_type)) + if all_items: + filter_query += get_states_filter(ACCOUNT.STATES) + + # Remove PE accounts for versions >= 2.9.0 (TODO move to constants) + if LV(calm_version) >= LV("2.9.0"): + filter_query += ";type!=nutanix" + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.account.list(params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch accounts from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No account found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "ACCOUNT TYPE", + "STATE", + "OWNER", + "CREATED ON", + "LAST UPDATED", + "UUID", + ] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + creation_time = int(metadata["creation_time"]) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + if "owner_reference" in metadata: + owner_reference_name = metadata["owner_reference"]["name"] + else: + owner_reference_name = "-" + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["resources"]["type"]), + highlight_text(row["resources"]["state"]), + highlight_text(owner_reference_name), + highlight_text(time.ctime(creation_time)), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(metadata["uuid"]), + ] + ) + click.echo(table) + + +def get_account(client, account_name): + + params = {"filter": "name=={}".format(account_name)} + res, err = client.account.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + account = None + if entities: + if len(entities) != 1: + raise Exception("More than one account found - {}".format(entities)) + + LOG.info("{} found ".format(account_name)) + account = entities[0] + else: + raise Exception("No account having name {} found".format(account_name)) + + account_id = account["metadata"]["uuid"] + LOG.info("Fetching account details") + res, err = client.account.read(account_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + account = res.json() + return account + + +def delete_account(account_names): + + client = get_api_client() + for account_name in account_names: + account = get_account(client, account_name) + account_id = account["metadata"]["uuid"] + _, err = client.account.delete(account_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info("Account {} deleted".format(account_name)) + + # Update account related caches i.e. Account, AhvImage, AhvSubnet + LOG.info("Updating accounts cache ...") + Cache.sync_table( + cache_type=[ + CACHE.ENTITY.ACCOUNT, + CACHE.ENTITY.AHV_DISK_IMAGE, + CACHE.ENTITY.AHV_SUBNET, + ] + ) + LOG.info("[Done]") + + +def describe_showback_data(spec): + + cost_items = spec[0]["state_cost_list"] + + for cost_item in cost_items: + if cost_item["state"] == "ON": + cost_list = cost_item["cost_list"] + for item in cost_list: + name = item["name"] + value = item["value"] + click.echo("\t{}: ".format(name.upper()), nl=False) + click.echo(highlight_text(str(value))) + + +def describe_nutanix_pe_account(spec): + + cluster_id = spec["cluster_uuid"] + cluster_name = spec["cluster_name"] + + click.echo("Cluster Id: {}".format(highlight_text(cluster_id))) + click.echo("Cluster Name: {}".format(highlight_text(cluster_name))) + + +def describe_nutanix_pc_account(provider_data): + + client = get_api_client() + ContextObj = get_context() + server_config = ContextObj.get_server_config() + + pc_port = provider_data["port"] + host_pc = provider_data["host_pc"] + pc_ip = provider_data["server"] if not host_pc else server_config["pc_ip"] + + click.echo("Is Host PC: {}".format(highlight_text(host_pc))) + click.echo("PC IP: {}".format(highlight_text(pc_ip))) + click.echo("PC Port: {}".format(highlight_text(pc_port))) + + cluster_list = provider_data["cluster_account_reference_list"] + if cluster_list: + click.echo("\nCluster Accounts:\n-----------------") + + for index, cluster in enumerate(cluster_list): + cluster_data = cluster["resources"]["data"] + click.echo( + "\n{}. {} (uuid: {})\tPE Account UUID: {}".format( + str(index + 1), + highlight_text(cluster_data["cluster_name"]), + highlight_text(cluster_data["cluster_uuid"]), + highlight_text(cluster["uuid"]), + ) + ) + + res, err = client.showback.status() + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + showback_status = res["current_status"] == "enabled" + if not showback_status: + click.echo("Showback Status: {}".format(highlight_text("Not Enabled"))) + else: + click.echo("Showback Status: {}".format(highlight_text("Enabled"))) + price_items = cluster["resources"].get("price_items", []) + click.echo("Resource Usage Costs:\n----------------------") + describe_showback_data(price_items) + + +def describe_aws_account(spec): + + click.echo("Access Key ID: {}".format(spec["access_key_id"])) + regions = spec["regions"] + + click.echo("\nRegions:\n-------------- ") + for index, region in enumerate(regions): + click.echo("\t{}. {}".format(str(index + 1), highlight_text(region["name"]))) + + click.echo("\nPublic Images:\n-------------- ") + image_present = False + for region in regions: + if region.get("images"): + click.echo("\nRegion: {}".format(region["name"])) + click.echo("Images: ") + for index, image in enumerate(region["images"]): + image_present = True + click.echo( + "\t{}. {}".format(str(index + 1), highlight_text(image["name"])) + ) + + if not image_present: + click.echo("\t{}".format(highlight_text("No images provided"))) + + +def describe_vmware_account(spec): + + click.echo("Server: {}".format(highlight_text(spec["server"]))) + click.echo("Username: {}".format(highlight_text(spec["username"]))) + click.echo("Port: {}".format(highlight_text(spec["port"]))) + click.echo("Datacenter: {}".format(highlight_text(spec["datacenter"]))) + + +def describe_gcp_account(client, spec, account_id): + + click.echo("Project Id: {}".format(highlight_text(spec["project_id"]))) + click.echo("Client Email: {}".format(highlight_text(spec["client_email"]))) + click.echo("Token URI: {}".format(highlight_text(spec["token_uri"]))) + + click.echo("\nRegions:\n--------------\n") + regions = spec["regions"] + for index, region in enumerate(regions): + click.echo("\t{}. {}".format(str(index + 1), highlight_text(region["name"]))) + + if not regions: + click.echo("\t{}".format(highlight_text("No regions provided"))) + + click.echo("\nPublic Images:\n--------------\n") + images = spec["public_images"] + + Obj = get_resource_api("gcp/v1/images", client.connection) + payload = {"filter": "account_uuid=={};public_only==true".format(account_id)} + + res, err = Obj.list(payload) # TODO move this to GCP specific method + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + public_images = res.json()["entities"] + image_selfLink_name_map = {} + + for image in public_images: + name = image["status"]["name"] + selfLink = image["status"]["resources"]["selfLink"] + image_selfLink_name_map[selfLink] = name + + for index, image in enumerate(images): + name = image_selfLink_name_map.get(image["selfLink"], None) + if name: + click.echo("\t{}. {}".format(str(index + 1), highlight_text(name))) + + if not regions: + click.echo(highlight_text("No regions provided")) + + click.echo("\nGKE Details:\n--------------\n") + gke_config = spec["gke_config"] + + if not gke_config: + click.echo("\t{}".format(highlight_text("GKE not enabled"))) + else: + click.echo("{}: {}".format("Port", highlight_text(str(gke_config["port"])))) + click.echo("{}: {}".format("Server", highlight_text(gke_config["server"]))) + + +def describe_azure_account(spec): + + click.echo("Subscription ID: {}".format(highlight_text(spec["subscription_id"]))) + click.echo("Tenant ID: {}".format(highlight_text(spec["tenant_id"]))) + click.echo("Client ID: {}".format(highlight_text(spec["client_id"]))) + click.echo( + "Cloud Environment: {}".format(highlight_text(spec["cloud_environment"])) + ) + + +def describe_k8s_account(spec): + + click.echo("Server IP: {}".format(highlight_text(spec["server"]))) + click.echo("Port: {}".format(highlight_text(spec["port"]))) + + click.echo("Authentication Type: ", nl=False) + auth_types = { + "basic": "Basic Auth", + "client_certificate": "Client Certificate", + "ca_certificate": "CA Certificate", + } + + auth = spec["authentication"] + auth_type = auth_types[auth["type"]] + click.echo(highlight_text(auth_type)) + + +def describe_custom_provider_account(client, spec): + provider_name = resource_type_name = spec["provider_reference"]["name"] + click.echo("Provider Name: {}".format(provider_name)) + + click.echo("Account Variables") + for variable in spec["variable_list"]: + click.echo("\t{}".format(highlight_text(variable["name"]))) + + Obj = client.resource_types + + params = {"filter": "name=={}".format(resource_type_name)} + res, err = Obj.list(params=params) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + response = res.json() + entities = response.get("entities", None) + resource_type = None + if entities: + if len(entities) != 1: + LOG.exception("More than one account found - {}".format(entities)) + sys.exit(-1) + + LOG.info("{} found ".format(resource_type_name)) + resource_type = entities[0] + else: + LOG.exception("No account having name {} found".format(resource_type_name)) + sys.exit(-1) + + click.echo("Resource Type Schema Variables") + for schema_variable in resource_type["status"]["resources"]["schema_list"]: + click.echo("\t{}".format(highlight_text(schema_variable["name"]))) + + click.echo("Resource Type Variables List") + for variable in resource_type["status"]["resources"]["variable_list"]: + click.echo("\t{}".format(highlight_text(variable["name"]))) + + +def describe_account(account_name): + + client = get_api_client() + account = get_account(client, account_name) + account_type = account["status"]["resources"]["type"] + account_id = account["metadata"]["uuid"] + + click.echo("\n----Account Summary----\n") + + click.echo("\t\t", nl=False) + click.secho("GENERAL DETAILS\n", bold=True, underline=True) + click.echo( + "Name: " + + highlight_text(account_name) + + " (uuid: " + + highlight_text(account_id) + + ")" + ) + click.echo("Status: " + highlight_text(account["status"]["resources"]["state"])) + click.echo("Account Type: " + highlight_text(account_type.upper())) + click.echo( + "Owner: " + highlight_text(account["metadata"]["owner_reference"]["name"]) + ) + created_on = int(account["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + + provider_data = account["status"]["resources"]["data"] + + click.echo("\n\t\t", nl=False) + click.secho("PROVIDER SPECIFIC DETAILS\n", bold=True, underline=True) + + if account_type == "nutanix": + describe_nutanix_pe_account(provider_data) + + if account_type == "nutanix_pc": + describe_nutanix_pc_account(provider_data) + + elif account_type == "aws": + describe_aws_account(provider_data) + + elif account_type == "vmware": + describe_vmware_account(provider_data) + + elif account_type == "gcp": + describe_gcp_account(client, provider_data, account_id) + + elif account_type == "k8s": + describe_k8s_account(provider_data) + + elif account_type == "azure": + describe_azure_account(provider_data) + + elif account_type == "custom_provider": + describe_custom_provider_account(client, provider_data) + + else: + click.echo("Provider details not present") + + if account_type in ["nutanix", "vmware"]: + res, err = client.showback.status() + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + showback_status = res["current_status"] == "enabled" + if not showback_status: + click.echo("Showback Status: {}".format(highlight_text("Not Enabled"))) + else: + price_items = account["status"]["resources"]["price_items"] + click.echo("Showback Status: {}".format(highlight_text("Enabled"))) + click.echo("Resource Usage Costs:\n----------------------") + describe_showback_data(price_items) + + click.echo("") + + +def sync_account(account_name): + """Sync account with corresponding account name""" + + client = get_api_client() + account_uuid = client.account.get_name_uuid_map().get(account_name, "") + + if not account_uuid: + LOG.error("Could not find the account {}".format(account_name)) + sys.exit(-1) + + res, err = client.account.platform_sync(account_uuid) + LOG.info(res.json()) + + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info("[DONE]") diff --git a/framework/calm/dsl/cli/acp_commands.py b/framework/calm/dsl/cli/acp_commands.py new file mode 100644 index 0000000..a40d396 --- /dev/null +++ b/framework/calm/dsl/cli/acp_commands.py @@ -0,0 +1,105 @@ +import click + +from .acps import ( + get_acps, + create_acp, + delete_acp, + update_acp, + describe_acp, +) +from .main import get, create, delete, update, describe + + +@get.command("acps") +@click.option("--name", "-n", default=None, help="Search for acps by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter acps by this string" +) +@click.option("--project", "-p", required=True, help="ACP project name") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option("--quiet", "-q", is_flag=True, default=False, help="Show only acp names") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_acps(name, project, filter_by, limit, offset, quiet, out): + """Get acps, optionally filtered by a string""" + + get_acps(name, project, filter_by, limit, offset, quiet, out) + + +@create.command("acp") +@click.option( + "--role", + "-r", + required=True, + help="ACP role", +) +@click.option("--project", "-p", required=True, help="ACP project name") +@click.option("--user", "-u", "users", multiple=True, default=[]) +@click.option("--group", "-g", "groups", multiple=True, default=[]) +@click.option("--name", "-name", default=None) +def _create_acp(role, project, users, groups, name): + """Creates an acp""" + + create_acp(role, project, users, groups, name) + + +@delete.command("acp") +@click.argument("acp_name") +@click.option("--project", "-p", required=True, help="ACP project name") +def _delete_acp(acp_name, project): + """Deletes an acp""" + + delete_acp(acp_name, project) + + +@update.command("acp") +@click.argument("acp_name") +@click.option("--project", "-p", required=True, help="ACP project name") +@click.option("--add_user", "-au", "add_user_list", multiple=True, default=[]) +@click.option("--add_group", "-ag", "add_group_list", multiple=True, default=[]) +@click.option("--remove_user", "-ru", "remove_user_list", multiple=True, default=[]) +@click.option("--remove_group", "-rg", "remove_group_list", multiple=True, default=[]) +def _update_acp( + acp_name, + project, + add_user_list, + add_group_list, + remove_user_list, + remove_group_list, +): + """Updates an acp""" + + update_acp( + acp_name=acp_name, + project_name=project, + add_user_list=add_user_list, + add_group_list=add_group_list, + remove_user_list=remove_user_list, + remove_group_list=remove_group_list, + ) + + +@describe.command("acp") +@click.argument("acp_name") +@click.option("--project", "-p", required=True, help="ACP project name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_acp(acp_name, project, out): + """Describe an acp""" + + describe_acp(acp_name=acp_name, project_name=project, out=out) diff --git a/framework/calm/dsl/cli/acps.py b/framework/calm/dsl/cli/acps.py new file mode 100644 index 0000000..98935d5 --- /dev/null +++ b/framework/calm/dsl/cli/acps.py @@ -0,0 +1,692 @@ +import click +import json +import sys +import uuid +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.builtins import Ref + +from .constants import ACP +from .task_commands import watch_task +from .utils import get_name_query, highlight_text + + +LOG = get_logging_handle(__name__) + + +def get_acps_from_project(client, project_uuid, **kwargs): + """This routine gets acps from project using project uuid""" + + # get project details + projects_intermal_obj = get_resource_api("projects_internal", client.connection) + proj_info, err = projects_intermal_obj.read(project_uuid) + if err: + return None, err + + proj_info = proj_info.json() + + # construct acp info dict + acps = {} + acps["entities"] = [] + role_uuid = kwargs.get("role_uuid", None) + acp_name = kwargs.get("acp_name", None) + limit = kwargs.get("limit", 20) + offset = kwargs.get("offset", 0) + + terminate = False + for acp in proj_info["status"]["access_control_policy_list_status"]: + + # role uuid filter + if ( + role_uuid + and role_uuid + != acp["access_control_policy_status"]["resources"]["role_reference"][ + "uuid" + ] + ): + continue + + # acp name filter + if acp_name and acp_name != acp["access_control_policy_status"]["name"]: + continue + elif acp_name: + terminate = True + + (acps["entities"]).append( + {"status": acp["access_control_policy_status"], "metadata": acp["metadata"]} + ) + + if terminate: + break + + acps["metadata"] = {"total_matches": len(acps["entities"])} + + acps["entities"] = acps["entities"][offset : offset + limit] + return acps, None + + +def get_acps(name, project_name, filter_by, limit, offset, quiet, out): + """Get the acps, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + + project_uuid = project_name_uuid_map.get(project_name, "") + if not project_uuid: + LOG.error("Project '{}' not found".format(project_name)) + sys.exit(-1) + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + if filter_query: + params["filter"] = filter_query + + if project_uuid: + res, err = get_acps_from_project( + client, project_uuid, limit=limit, offset=offset + ) + else: + res, err = client.acp.list(params=params) + res = res.json() + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch acps from {}".format(pc_ip)) + return + + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No acp found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "REFERENCED_ROLE", + "REFERENCED_PROJECT", + "UUID", + ] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + role_ref = row["resources"].get("role_reference", {}) + role = role_ref.get("name", "-") + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(role), + highlight_text(project_name), + highlight_text(metadata["uuid"]), + ] + ) + + click.echo(table) + + +def get_system_roles(): + + # 'Self-Service Admin', 'Prism Admin', 'Prism Viewer', 'Super Admin' are forbidden roles + return ["Project Admin", "Operator", "Consumer", "Developer"] + + +def create_acp(role, project, acp_users, acp_groups, name): + + if not (acp_users or acp_groups): + LOG.error("Atleast single user/group should be given") + sys.exit(-1) + + client = get_api_client() + acp_name = name or "nuCalmAcp-{}".format(str(uuid.uuid4())) + + # Check whether there is an existing acp with this name + params = {"filter": "name=={}".format(acp_name)} + res, err = client.acp.list(params=params) + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + + if entities: + LOG.error("ACP {} already exists.".format(acp_name)) + sys.exit(-1) + + params = {"length": 250} + project_name_uuid_map = client.project.get_name_uuid_map(params) + + project_uuid = project_name_uuid_map.get(project, "") + if not project_uuid: + LOG.error("Project '{}' not found".format(project)) + sys.exit(-1) + + LOG.info("Fetching project '{}' details".format(project)) + ProjectInternalObj = get_resource_api("projects_internal", client.connection) + res, err = ProjectInternalObj.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + project_payload = res.json() + project_payload.pop("status", None) + project_resources = project_payload["spec"]["project_detail"].get("resources", "") + + # Check if users are present in project + project_users = [] + for user in project_resources.get("user_reference_list", []): + project_users.append(user["name"]) + + if not set(acp_users).issubset(set(project_users)): + LOG.error( + "Users {} are not registered in project".format( + set(acp_users).difference(set(project_users)) + ) + ) + sys.exit(-1) + + # Check if groups are present in project + project_groups = [] + for group in project_resources.get("external_user_group_reference_list", []): + project_groups.append(group["name"]) + + if not set(acp_groups).issubset(set(project_groups)): + LOG.error( + "Groups {} are not registered in project".format( + set(acp_groups).difference(set(project_groups)) + ) + ) + sys.exit(-1) + + role_cache_data = Cache.get_entity_data(entity_type=CACHE.ENTITY.ROLE, name=role) + if not role_cache_data.get("uuid"): + LOG.error("Role with name {} not found".format(role)) + sys.exit(-1) + role_uuid = role_cache_data.get("uuid") + + limit = 250 + res, err = get_acps_from_project( + client, project_uuid, role_uuid=role_uuid, limit=limit + ) + if err: + return None, err + + entities = res.get("entities", None) + if res["metadata"]["total_matches"] > 0: + LOG.error( + "ACP {} already exists for given role in project".format( + entities[0]["status"]["name"] + ) + ) + sys.exit(-1) + + # Constructing ACP payload -------- + + # Getting the cluster uuids for acp + whitelisted_subnets = [] + whiltelisted_clusters = [] + for subnet in project_resources.get("subnet_reference_list", []): + whitelisted_subnets.append(subnet["uuid"]) + + for subnet in project_resources.get("external_network_list", []): + whitelisted_subnets.append(subnet["uuid"]) + + for cluster in project_resources.get("cluster_reference_list", []): + whiltelisted_clusters.append(cluster["uuid"]) + + cluster_uuids = [] + for subnet_uuid in whitelisted_subnets: + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=subnet_uuid + ) + + if subnet_cache_data.get("subnet_type", "VLAN") == "VLAN": + cluster_uuids.append(subnet_cache_data["cluster_uuid"]) + + cluster_uuids = list(set(whiltelisted_clusters) | set(cluster_uuids)) + # Default context for acp + default_context = ACP.DEFAULT_CONTEXT + + # Setting project uuid in default context + default_context["scope_filter_expression_list"][0]["right_hand_side"][ + "uuid_list" + ] = [project_uuid] + + # Role specific filters + entity_filter_expression_list = [] + if role == "Project Admin": + entity_filter_expression_list = ( + ACP.ENTITY_FILTER_EXPRESSION_LIST.PROJECT_ADMIN + ) # TODO remove index bases searching + entity_filter_expression_list[4]["right_hand_side"]["uuid_list"] = [ + project_uuid + ] + + elif role == "Developer": + entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.DEVELOPER + + elif role == "Consumer": + entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.CONSUMER + + elif role == "Operator" and cluster_uuids: + entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.OPERATOR + + else: + entity_filter_expression_list = get_filters_custom_role(role_uuid, client) + + if cluster_uuids: + entity_filter_expression_list.append( + { + "operator": "IN", + "left_hand_side": {"entity_type": "cluster"}, + "right_hand_side": {"uuid_list": cluster_uuids}, + } + ) + + # TODO check these users are not present in project's other acps + user_references = [] + user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) + for u in acp_users: + user_references.append( + {"kind": "user", "name": u, "uuid": user_name_uuid_map[u]} + ) + + usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) + group_references = [] + for g in acp_groups: + group_references.append( + {"kind": "user_group", "name": g, "uuid": usergroup_name_uuid_map[g]} + ) + + context_list = [default_context] + if entity_filter_expression_list: + context_list.append( + {"entity_filter_expression_list": entity_filter_expression_list} + ) + + acp_payload = { + "acp": { + "name": acp_name, + "resources": { + "role_reference": Ref.Role(role), + "user_reference_list": user_references, + "user_group_reference_list": group_references, + "filter_list": {"context_list": context_list}, + }, + }, + "metadata": {"kind": "access_control_policy"}, + "operation": "ADD", + } + + # Appending acp payload to project + acp_list = project_payload["spec"].get("access_control_policy_list", []) + for _acp in acp_list: + _acp["operation"] = "UPDATE" + + acp_list.append(acp_payload) + project_payload["spec"]["access_control_policy_list"] = acp_list + + LOG.info("Creating acp {}".format(acp_name)) + res, err = ProjectInternalObj.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + stdout_dict = { + "name": acp_name, + "execution_context": res["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + LOG.info("Polling on acp creation task") + watch_task(res["status"]["execution_context"]["task_uuid"]) + + +def get_filters_custom_role(role_uuid, client): + + role, err = client.role.read(id=role_uuid) + if err: + LOG.error("Couldn't fetch role with uuid {}, error: {}".format(role_uuid, err)) + sys.exit(-1) + role = role.json() + permissions_list = ( + role.get("status", {}).get("resources", {}).get("permission_reference_list", []) + ) + permission_names = set() + for perm in permissions_list: + if perm: + perm_name = perm.get("name", "") + if perm_name: + permission_names.add(perm_name.lower()) + entity_filter_expression_list = [] + for perm_filter in ACP.CUSTOM_ROLE_PERMISSIONS_FILTERS: + if perm_filter.get("permission") in permission_names: + entity_filter_expression_list.append(perm_filter.get("filter")) + return entity_filter_expression_list + + +def delete_acp(acp_name, project_name): + + client = get_api_client() + + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + + project_uuid = project_name_uuid_map.get(project_name, "") + if not project_uuid: + LOG.error("Project '{}' not found.".format(project_name)) + sys.exit(-1) + + LOG.info("Fetching project '{}' details".format(project_name)) + ProjectInternalObj = get_resource_api("projects_internal", client.connection) + res, err = ProjectInternalObj.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + project_payload = res.json() + project_payload.pop("status", None) + + is_acp_present = False + for _row in project_payload["spec"].get("access_control_policy_list", []): + if _row["acp"]["name"] == acp_name: + _row["operation"] = "DELETE" + is_acp_present = True + else: + _row["operation"] = "UPDATE" + + if not is_acp_present: + LOG.error( + "ACP({}) is not associated with project({})".format(acp_name, project_name) + ) + sys.exit(-1) + + LOG.info( + "Deleting acp '{}' associated with project '{}'".format(acp_name, project_name) + ) + res, err = ProjectInternalObj.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + stdout_dict = { + "name": acp_name, + "execution_context": res["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + LOG.info("Polling on acp deletion task") + watch_task(res["status"]["execution_context"]["task_uuid"]) + + +def describe_acp(acp_name, project_name, out): + + client = get_api_client() + + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + + project_uuid = project_name_uuid_map.get(project_name, "") + if not project_uuid: + LOG.error("Project '{}' not found".format(project_name)) + sys.exit(-1) + + limit = 250 + res, err = get_acps_from_project( + client, project_uuid, acp_name=acp_name, limit=limit + ) + + if err: + return None, err + + if res["metadata"]["total_matches"] == 0: + LOG.error( + "No ACP found with name '{}' and project '{}'".format( + acp_name, project_name + ) + ) + sys.exit(-1) + + acp_uuid = res["entities"][0]["metadata"]["uuid"] + LOG.info("Fetching acp {} details".format(acp_name)) + res, err = client.acp.read(acp_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + acp = res.json() + if out == "json": + click.echo(json.dumps(acp, indent=4, separators=(",", ": "))) + return + + click.echo("\n----ACP Summary----\n") + click.echo("Name: " + highlight_text(acp_name) + " (uuid: " + acp_uuid + ")") + click.echo("Status: " + highlight_text(acp["status"]["state"])) + click.echo("Project: " + highlight_text(project_name)) + + acp_users = acp["status"]["resources"].get("user_reference_list", []) + acp_groups = acp["status"]["resources"].get("user_group_reference_list", []) + acp_role = acp["status"]["resources"].get("role_reference", []) + + if acp_role: + role_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ROLE, uuid=acp_role["uuid"] + ) + if not role_data: + LOG.error( + "Role ({}) details not present. Please update cache".format( + acp_role["uuid"] + ) + ) + sys.exit(-1) + click.echo("Role: " + highlight_text(role_data["name"])) + + if acp_users: + user_uuid_name_map = client.user.get_uuid_name_map({"length": 1000}) + click.echo("Users [{}]:".format(highlight_text(len(acp_users)))) + for user in acp_users: + click.echo("\t" + highlight_text(user_uuid_name_map[user["uuid"]])) + + if acp_groups: + usergroup_uuid_name_map = client.group.get_uuid_name_map({"length": 1000}) + click.echo("Groups [{}]:".format(highlight_text(len(acp_groups)))) + for group in acp_groups: + click.echo("\t" + highlight_text(usergroup_uuid_name_map[group["uuid"]])) + + +def update_acp( + acp_name, + project_name, + add_user_list, + add_group_list, + remove_user_list, + remove_group_list, +): + + if not (add_user_list or add_group_list or remove_user_list or remove_group_list): + LOG.error("Atleast single user/group should be given for add/remove operations") + sys.exit(-1) + + client = get_api_client() + + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + + project_uuid = project_name_uuid_map.get(project_name, "") + if not project_uuid: + LOG.error("Project '{}' not found".format(project_name)) + sys.exit(-1) + + LOG.info("Fetching project '{}' details".format(project_name)) + ProjectInternalObj = get_resource_api("projects_internal", client.connection) + res, err = ProjectInternalObj.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + project_payload = res.json() + project_payload.pop("status", None) + + project_resources = project_payload["spec"]["project_detail"]["resources"] + project_users = [] + project_groups = [] + for user in project_resources.get("user_reference_list", []): + project_users.append(user["name"]) + + for group in project_resources.get("external_user_group_reference_list", []): + project_groups.append(group["name"]) + + # Checking if to be added users/groups are registered in project + if not set(add_user_list).issubset(set(project_users)): + LOG.error( + "Users {} are not registered in project".format( + set(add_user_list).difference(set(project_users)) + ) + ) + sys.exit(-1) + + if not set(add_group_list).issubset(set(project_groups)): + LOG.error( + "Groups {} are not registered in project".format( + set(add_group_list).difference(set(project_groups)) + ) + ) + sys.exit(-1) + + # Raise error if same user/group is present in both add/remove list + common_users = set(add_user_list).intersection(set(remove_user_list)) + if common_users: + LOG.error("Users {} are both in add_user and remove_user".format(common_users)) + sys.exit(-1) + + common_groups = set(add_group_list).intersection(set(remove_group_list)) + if common_groups: + LOG.error( + "Groups {} are present both in add_groups and remove_groups".format( + common_groups + ) + ) + sys.exit(-1) + + # Flag to check whether given acp is present in project or not + is_acp_present = False + for _row in project_payload["spec"].get("access_control_policy_list", []): + _row["operation"] = "UPDATE" + + if _row["acp"]["name"] == acp_name: + is_acp_present = True + acp_resources = _row["acp"]["resources"] + updated_user_reference_list = [] + updated_group_reference_list = [] + + acp_users = [] + acp_groups = [] + for user in acp_resources.get("user_reference_list", []): + acp_users.append(user["name"]) + + for group in acp_resources.get("user_group_reference_list", []): + acp_groups.append(group["name"]) + + if not set(remove_user_list).issubset(set(acp_users)): + LOG.error( + "Users {} are not registered in acp".format( + set(remove_user_list).difference(set(acp_users)) + ) + ) + sys.exit(-1) + + if not set(remove_group_list).issubset(set(acp_groups)): + LOG.error( + "Groups {} are not registered in acp".format( + set(remove_group_list).difference(set(acp_groups)) + ) + ) + sys.exit(-1) + + for user in acp_resources.get("user_reference_list", []): + if user["name"] not in remove_user_list: + updated_user_reference_list.append(user) + + for group in acp_resources.get("user_group_reference_list", []): + if group["name"] not in remove_group_list: + updated_group_reference_list.append(group) + + # TODO check these users are not present in project's other acps + user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) + for user in add_user_list: + updated_user_reference_list.append( + {"kind": "user", "name": user, "uuid": user_name_uuid_map[user]} + ) + + usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) + for group in add_group_list: + updated_group_reference_list.append( + { + "kind": "user_group", + "name": group, + "uuid": usergroup_name_uuid_map[group], + } + ) + + acp_resources["user_reference_list"] = updated_user_reference_list + acp_resources["user_group_reference_list"] = updated_group_reference_list + + if not is_acp_present: + LOG.error( + "No ACP with name '{}' exists in project '{}'".format( + acp_name, project_name + ) + ) + sys.exit(-1) + + LOG.info( + "Updating acp '{}' associated with project '{}'".format(acp_name, project_name) + ) + res, err = ProjectInternalObj.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + stdout_dict = { + "name": acp_name, + "execution_context": res["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + LOG.info("Polling on acp updation task") + watch_task(res["status"]["execution_context"]["task_uuid"]) diff --git a/framework/calm/dsl/cli/app_commands.py b/framework/calm/dsl/cli/app_commands.py new file mode 100644 index 0000000..6bace98 --- /dev/null +++ b/framework/calm/dsl/cli/app_commands.py @@ -0,0 +1,321 @@ +import click + +from calm.dsl.api import get_api_client + +from .main import main, get, describe, delete, run, watch, download, create, update +from .utils import Display, FeatureFlagGroup +from .apps import ( + get_apps, + describe_app, + run_actions, + run_patches, + watch_patch_or_action, + watch_app, + delete_app, + download_runlog, + create_app, +) +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@create.command("app") +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Blueprint file to upload", +) +@click.option( + "--brownfield_deployments", + "-b", + "brownfield_deployment_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Brownfield Deployment file", +) +@click.option( + "--name", "-n", "app_name", default=None, help="Application name (Optional)" +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults while launching blueprint", +) +@click.option( + "--launch_params", + "-l", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +def _create_app( + app_name, + bp_file, + brownfield_deployment_file, + ignore_runtime_variables, + launch_params, +): + """Creates an application. + + \b + Command consumes a dsl blueprint file and creates a blueprint from it. + If created blueprint is in ACTIVE state, then it got launched to create an application. + Blueprint is deleted after launching it. + """ + + create_app( + app_name=app_name, + bp_file=bp_file, + patch_editables=not ignore_runtime_variables, + launch_params=launch_params, + brownfield_deployment_file=brownfield_deployment_file, + ) + + +@get.command("apps") +@click.option("--name", "-n", default=None, help="Search for apps by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter apps by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only application names" +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_apps(name, filter_by, limit, offset, quiet, all_items, out): + """Get Apps, optionally filtered by a string""" + get_apps(name, filter_by, limit, offset, quiet, all_items, out) + + +@describe.command("app") +@click.argument("app_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_app(app_name, out): + """Describe an app""" + describe_app(app_name, out) + + +@run.command("action") +@click.argument("action_name") +@click.option( + "--app", + "app_name", + "-a", + default=None, + required=True, + help="Watch action run in an app", +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults", +) +@click.option( + "--runtime_params", + "-r", + "runtime_params_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def _run_actions( + app_name, action_name, watch, ignore_runtime_variables, runtime_params_file +): + """App lcm actions. + All runtime variables will be prompted by default. When passing the 'ignore_runtime_editable' flag, no variables will be prompted and all default values will be used. + The action default values can be overridden by passing a Python file via 'launch_params'. Any variable not defined in the Python file will keep the default value defined in the blueprint. When passing a Python file, no variables will be prompted. + + \b + >: runtime_params: Python file consisting of variables 'variable_list' + Ex: variable_list = [ + { + "value": {"value": }, + "name": "" + } + ] + """ + + run_actions( + app_name=app_name, + action_name=action_name, + watch=watch, + patch_editables=not ignore_runtime_variables, + runtime_params_file=runtime_params_file, + ) + + +@watch.command("action_runlog") +@click.argument("runlog_uuid") +@click.option( + "--app", + "app_name", + "-a", + default=None, + required=True, + help="Watch action run in an app", +) +@click.option( + "--poll-interval", + "poll_interval", + "-p", + type=int, + default=10, + show_default=True, + help="Give polling interval", +) +def _watch_action_runlog(runlog_uuid, app_name, poll_interval): + """Watch an app""" + + def display_action(screen): + watch_patch_or_action( + runlog_uuid, app_name, get_api_client(), screen, poll_interval + ) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + LOG.info("Action run {} completed for app {}".format(runlog_uuid, app_name)) + + +@watch.command("app") +@click.argument("app_name") +@click.option( + "--poll-interval", + "poll_interval", + "-p", + type=int, + default=10, + show_default=True, + help="Give polling interval", +) +def _watch_app(app_name, poll_interval): + """Watch an app""" + + def display_action(screen): + watch_app(app_name, screen) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + LOG.info("Action runs completed for app {}".format(app_name)) + + +@download.command("action_runlog") +@click.argument("runlog_uuid") +@click.option( + "--app", "app_name", "-a", required=True, help="App the action belongs to" +) +@click.option("--file", "file_name", "-f", help="How to name the downloaded file") +def _download_runlog(runlog_uuid, app_name, file_name): + """Download runlogs, given runlog uuid and app name""" + download_runlog(runlog_uuid, app_name, file_name) + + +@delete.command("app") +@click.argument("app_names", nargs=-1) +@click.option("--soft", "-s", is_flag=True, default=False, help="Soft delete app") +def _delete_app(app_names, soft): + """Deletes an application""" + + delete_app(app_names, soft) + + +@main.group(cls=FeatureFlagGroup) +def start(): + """Start entities""" + pass + + +@main.group(cls=FeatureFlagGroup) +def stop(): + """Stop entities""" + pass + + +@main.group(cls=FeatureFlagGroup) +def restart(): + """Restart entities""" + pass + + +@start.command("app") +@click.argument("app_name") +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def start_app(app_name, watch): + """Starts an application""" + + run_actions(app_name=app_name, action_name="start", watch=watch) + + +@stop.command("app") +@click.argument("app_name") +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def stop_app(app_name, watch): + """Stops an application""" + + run_actions(app_name=app_name, action_name="stop", watch=watch) + + +@restart.command("app") +@click.argument("app_name") +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def restart_app(app_name, watch): + """Restarts an application""" + + run_actions(app_name=app_name, action_name="restart", watch=watch) + + +@update.command("app", feature_min_version="3.3.0") +@click.argument("app_name") +@click.argument("patch_name") +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults", +) +@click.option( + "--runtime_params", + "-r", + "runtime_params_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def update_app( + app_name, patch_name, watch, ignore_runtime_variables, runtime_params_file +): + """Updates an application""" + + run_patches( + app_name=app_name, + patch_name=patch_name, + watch=watch, + ignore_runtime_variables=ignore_runtime_variables, + runtime_params_file=runtime_params_file, + ) diff --git a/framework/calm/dsl/cli/app_icon_commands.py b/framework/calm/dsl/cli/app_icon_commands.py new file mode 100644 index 0000000..d79a51e --- /dev/null +++ b/framework/calm/dsl/cli/app_icon_commands.py @@ -0,0 +1,49 @@ +import click + +from .main import create, delete, get +from .app_icons import create_app_icon, delete_app_icon, get_app_icon_list + + +@create.command("app_icon") +@click.option( + "--file", + "-f", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Blueprint file to upload", +) +@click.option("--name", "-n", default=None, help="icon name") +def _create_app_icon(file, name): + """Creates a marketplace app icon""" + + create_app_icon(name, file) + + +@delete.command("app_icon") +@click.argument("icon_names", nargs=-1) +def _delete_app_icon(icon_names): + """Deletes a marketplace app icon""" + + delete_app_icon(icon_names) + + +@get.command("app_icons") +@click.option("--name", "-n", default=None, help="Search for app icons by name") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only app icon names." +) +@click.option( + "--marketplace_use", + "-m", + is_flag=True, + default=False, + help="Show whether used for marketplace icon or not", +) +def _get_app_icon_list(name, limit, offset, quiet, marketplace_use): + """Get the list of app_icons""" + + get_app_icon_list(name, limit, offset, quiet, marketplace_use) diff --git a/framework/calm/dsl/cli/app_icons.py b/framework/calm/dsl/cli/app_icons.py new file mode 100644 index 0000000..d30b856 --- /dev/null +++ b/framework/calm/dsl/cli/app_icons.py @@ -0,0 +1,66 @@ +import click +import sys +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client +from calm.dsl.log import get_logging_handle +from .utils import highlight_text, get_name_query + +LOG = get_logging_handle(__name__) + + +def create_app_icon(name, file): + """creates app icon""" + + client = get_api_client() + client.app_icon.upload(name, file) + + +def delete_app_icon(icon_names): + """deletes app_icons in icon_names""" + + client = get_api_client() + app_icon_name_uuid_map = client.app_icon.get_name_uuid_map() + + for icon_name in icon_names: + app_icon_uuid = app_icon_name_uuid_map.get(icon_name, None) + if not app_icon_uuid: + LOG.error("APP icon: {} not found") + sys.exit(-1) + client.app_icon.delete(app_icon_uuid) + LOG.info("App Icon {} deleted".format(icon_name)) + + +def get_app_icon_list(name, limit, offset, quiet, marketplace_use=False): + """Get list of app icons""" + + client = get_api_client() + params = {"length": limit, "offset": offset} + if name: + params["filter"] = get_name_query([name]) + + app_icon_name_uuid_map = client.app_icon.get_name_uuid_map(params) + if quiet: + for name in app_icon_name_uuid_map.keys(): + click.echo(highlight_text(name)) + return + + table = PrettyTable() + field_names = ["NAME", "UUID"] + if marketplace_use: + field_names.append("IS_MARKETPLACE_ICON") + + table.field_names = field_names + for name, uuid in app_icon_name_uuid_map.items(): + data_row = [highlight_text(name), highlight_text(uuid)] + if marketplace_use: + res, err = client.app_icon.is_marketplace_icon(uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + res = res.json() + data_row.append(highlight_text(res["is_marketplaceicon"])) + + table.add_row(data_row) + + click.echo(table) diff --git a/framework/calm/dsl/cli/apps.py b/framework/calm/dsl/cli/apps.py new file mode 100644 index 0000000..f52463e --- /dev/null +++ b/framework/calm/dsl/cli/apps.py @@ -0,0 +1,1564 @@ +import os +import sys +import time +import json +import re +import uuid +from json import JSONEncoder + +import arrow +import click +from prettytable import PrettyTable +from anytree import NodeMixin, RenderTree + +from calm.dsl.api import get_api_client +from calm.dsl.config import get_context + +from .utils import get_name_query, get_states_filter, highlight_text, Display +from .constants import APPLICATION, RUNLOG, SYSTEM_ACTIONS +from .bps import ( + launch_blueprint_simple, + compile_blueprint, + create_blueprint, + get_app, + parse_launch_runtime_vars, + parse_launch_params_attribute, +) +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def get_apps(name, filter_by, limit, offset, quiet, all_items, out): + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if all_items: + filter_query += get_states_filter(APPLICATION.STATES, state_key="_state") + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.application.list(params=params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch applications from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No application found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "SOURCE BLUEPRINT", + "STATE", + "PROJECT", + "OWNER", + "CREATED ON", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + project = ( + metadata["project_reference"]["name"] + if "project_reference" in metadata + else None + ) + + creation_time = int(metadata["creation_time"]) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["resources"]["app_blueprint_reference"]["name"]), + highlight_text(row["state"]), + highlight_text(project), + highlight_text(metadata["owner_reference"]["name"]), + highlight_text(time.ctime(creation_time)), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(row["uuid"]), + ] + ) + click.echo(table) + + +def _get_app(client, app_name, screen=Display(), all=False): + # 1. Get app_uuid from list api + params = {"filter": "name=={}".format(app_name)} + if all: + params["filter"] += get_states_filter(APPLICATION.STATES, state_key="_state") + + res, err = client.application.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + app = None + if entities: + app = entities[0] + if len(entities) != 1: + # If more than one item found, check if an exact name match is present. Else raise. + found = False + for ent in entities: + if ent["metadata"]["name"] == app_name: + app = ent + found = True + break + if not found: + raise Exception("More than one app found - {}".format(entities)) + + screen.clear() + LOG.info("App {} found".format(app_name)) + screen.refresh() + app = entities[0] + else: + raise Exception("No app found with name {} found".format(app_name)) + app_id = app["metadata"]["uuid"] + + # 2. Get app details + screen.clear() + LOG.info("Fetching app details") + screen.refresh() + res, err = client.application.read(app_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + app = res.json() + return app + + +def describe_app(app_name, out): + client = get_api_client() + app = _get_app(client, app_name, all=True) + + if out == "json": + click.echo(json.dumps(app, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Application Summary----\n") + app_name = app["metadata"]["name"] + click.echo( + "Name: " + + highlight_text(app_name) + + " (uuid: " + + highlight_text(app["metadata"]["uuid"]) + + ")" + ) + click.echo("Status: " + highlight_text(app["status"]["state"])) + click.echo( + "Owner: " + highlight_text(app["metadata"]["owner_reference"]["name"]), nl=False + ) + click.echo( + " Project: " + highlight_text(app["metadata"]["project_reference"]["name"]) + ) + + click.echo( + "Blueprint: " + + highlight_text(app["status"]["resources"]["app_blueprint_reference"]["name"]) + ) + + created_on = int(app["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + + click.echo( + "Application Profile: " + + highlight_text( + app["status"]["resources"]["app_profile_config_reference"]["name"] + ) + ) + + deployment_list = app["status"]["resources"]["deployment_list"] + click.echo("Deployments [{}]:".format(highlight_text((len(deployment_list))))) + for deployment in deployment_list: + click.echo( + "\t {} {}".format( + highlight_text(deployment["name"]), highlight_text(deployment["state"]) + ) + ) + + action_list = app["status"]["resources"]["action_list"] + click.echo("App Actions [{}]:".format(highlight_text(len(action_list)))) + for action in action_list: + action_name = action["name"] + if action_name.startswith("action_"): + prefix_len = len("action_") + action_name = action_name[prefix_len:] + click.echo("\t" + highlight_text(action_name)) + + patch_list = app["status"]["resources"]["patch_list"] + click.echo("App Patches [{}]:".format(highlight_text(len(patch_list)))) + for patch in patch_list: + patch_name = patch["name"] + if patch_name.startswith("patch_"): + prefix_len = len("patch_") + patch_name = patch_name[prefix_len:] + click.echo("\t" + highlight_text(patch_name)) + + variable_list = app["status"]["resources"]["variable_list"] + click.echo("App Variables [{}]".format(highlight_text(len(variable_list)))) + for variable in variable_list: + click.echo( + "\t{}: {} # {}".format( + highlight_text(variable["name"]), + highlight_text(variable["value"]), + highlight_text(variable["label"]), + ) + ) + + click.echo("App Runlogs:") + + def display_runlogs(screen): + watch_app(app_name, screen, app) + + Display.wrapper(display_runlogs, watch=False) + + click.echo( + "# Hint: You can run actions on the app using: calm run action --app {}".format( + app_name + ) + ) + + +def create_app( + bp_file, + brownfield_deployment_file=None, + app_name=None, + profile_name=None, + patch_editables=True, + launch_params=None, +): + client = get_api_client() + + # Compile blueprint + bp_payload = compile_blueprint( + bp_file, brownfield_deployment_file=brownfield_deployment_file + ) + if bp_payload is None: + LOG.error("User blueprint not found in {}".format(bp_file)) + sys.exit(-1) + + # Check if give app name exists or generate random app name + if app_name: + res = get_app(app_name) + if res: + LOG.debug(res) + LOG.error("Application Name ({}) is already used.".format(app_name)) + sys.exit(-1) + else: + app_name = "App{}".format(str(uuid.uuid4())[:10]) + + # Get the blueprint type + bp_type = bp_payload["spec"]["resources"].get("type", "") + + # Create blueprint from dsl file + bp_name = "Blueprint{}".format(str(uuid.uuid4())[:10]) + LOG.info("Creating blueprint {}".format(bp_name)) + res, err = create_blueprint(client=client, bp_payload=bp_payload, name=bp_name) + if err: + LOG.error(err["error"]) + return + + bp = res.json() + bp_state = bp["status"].get("state", "DRAFT") + bp_uuid = bp["metadata"].get("uuid", "") + + if bp_state != "ACTIVE": + LOG.debug("message_list: {}".format(bp["status"].get("message_list", []))) + LOG.error("Blueprint {} went to {} state".format(bp_name, bp_state)) + sys.exit(-1) + + LOG.info( + "Blueprint {}(uuid={}) created successfully.".format( + highlight_text(bp_name), highlight_text(bp_uuid) + ) + ) + + # Creating an app + LOG.info("Creating app {}".format(app_name)) + launch_blueprint_simple( + blueprint_name=bp_name, + app_name=app_name, + profile_name=profile_name, + patch_editables=patch_editables, + launch_params=launch_params, + is_brownfield=True if bp_type == "BROWNFIELD" else False, + skip_app_name_check=True, + ) + + if bp_type != "BROWNFIELD": + # Delete the blueprint + res, err = client.blueprint.delete(bp_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + +class RunlogNode(NodeMixin): + def __init__(self, runlog, parent=None, children=None): + self.runlog = runlog + self.parent = parent + if children: + self.children = children + + +class RunlogJSONEncoder(JSONEncoder): + def default(self, obj): + + if not isinstance(obj, RunlogNode): + return super().default(obj) + + metadata = obj.runlog["metadata"] + status = obj.runlog["status"] + state = status["state"] + + if status["type"] == "task_runlog": + name = status["task_reference"]["name"] + elif status["type"] == "runbook_runlog": + if "call_runbook_reference" in status: + name = status["call_runbook_reference"]["name"] + else: + name = status["runbook_reference"]["name"] + elif status["type"] == "action_runlog" and "action_reference" in status: + name = status["action_reference"]["name"] + elif status["type"] == "app": + return status["name"] + else: + return "root" + + # TODO - Fix KeyError for action_runlog + """ + elif status["type"] == "action_runlog": + name = status["action_reference"]["name"] + elif status["type"] == "app": + return status["name"] + """ + + creation_time = int(metadata["creation_time"]) // 1000000 + username = ( + status["userdata_reference"]["name"] + if "userdata_reference" in status + else None + ) + last_update_time = int(metadata["last_update_time"]) // 1000000 + + encodedStringList = [] + encodedStringList.append("{} (Status: {})".format(name, state)) + if status["type"] == "action_runlog": + encodedStringList.append("\tRunlog UUID: {}".format(metadata["uuid"])) + encodedStringList.append("\tStarted: {}".format(time.ctime(creation_time))) + + if username: + encodedStringList.append("\tRun by: {}".format(username)) + if state in RUNLOG.TERMINAL_STATES: + encodedStringList.append( + "\tFinished: {}".format(time.ctime(last_update_time)) + ) + else: + encodedStringList.append( + "\tLast Updated: {}".format(time.ctime(last_update_time)) + ) + + return "\n".join(encodedStringList) + + +def get_completion_func(screen): + def is_action_complete(response): + + entities = response["entities"] + if len(entities): + + # Sort entities based on creation time + sorted_entities = sorted( + entities, key=lambda x: int(x["metadata"]["creation_time"]) + ) + + # Create nodes of runlog tree and a map based on uuid + root = None + nodes = {} + for runlog in sorted_entities: + # Create root node + # TODO - Get details of root node + if not root: + root_uuid = runlog["status"]["root_reference"]["uuid"] + root_runlog = { + "metadata": {"uuid": root_uuid}, + "status": {"type": "action_runlog", "state": ""}, + } + root = RunlogNode(root_runlog) + nodes[str(root_uuid)] = root + + uuid = runlog["metadata"]["uuid"] + nodes[str(uuid)] = RunlogNode(runlog, parent=root) + + # Attach parent to nodes + for runlog in sorted_entities: + uuid = runlog["metadata"]["uuid"] + parent_uuid = runlog["status"]["parent_reference"]["uuid"] + node = nodes[str(uuid)] + node.parent = nodes[str(parent_uuid)] + + # Show Progress + # TODO - Draw progress bar + total_tasks = 0 + completed_tasks = 0 + for runlog in sorted_entities: + runlog_type = runlog["status"]["type"] + if runlog_type == "task_runlog": + total_tasks += 1 + state = runlog["status"]["state"] + if state in RUNLOG.STATUS.SUCCESS: + completed_tasks += 1 + + if total_tasks: + screen.clear() + progress = "{0:.2f}".format(completed_tasks / total_tasks * 100) + screen.print_at("Progress: {}%".format(progress), 0, 0) + + # Render Tree on next line + line = 1 + for pre, fill, node in RenderTree(root): + lines = json.dumps(node, cls=RunlogJSONEncoder).split("\\n") + for linestr in lines: + tabcount = linestr.count("\\t") + if not tabcount: + screen.print_at("{}{}".format(pre, linestr), 0, line) + else: + screen.print_at( + "{}{}".format(fill, linestr.replace("\\t", "")), 0, line + ) + line += 1 + screen.refresh() + + for runlog in sorted_entities: + state = runlog["status"]["state"] + if state in RUNLOG.FAILURE_STATES: + msg = "Action failed." + screen.print_at(msg, 0, line) + screen.refresh() + return (True, msg) + if state not in RUNLOG.TERMINAL_STATES: + return (False, "") + + msg = "Action ran successfully." + if os.isatty(sys.stdout.fileno()): + msg += " Exit screen? " + screen.print_at(msg, 0, line) + screen.refresh() + + return (True, msg) + return (False, "") + + return is_action_complete + + +def watch_patch_or_action(runlog_uuid, app_name, client, screen, poll_interval=10): + app = _get_app(client, app_name, screen=screen) + app_uuid = app["metadata"]["uuid"] + + url = client.application.ITEM.format(app_uuid) + "/app_runlogs/list" + payload = {"filter": "root_reference=={}".format(runlog_uuid)} + + def poll_func(): + return client.application.poll_action_run(url, payload) + + poll_runnnable(poll_func, get_completion_func(screen), poll_interval) + + +def watch_app(app_name, screen, app=None, poll_interval=10): + """Watch an app""" + + client = get_api_client() + is_app_describe = False + + if not app: + app = _get_app(client, app_name, screen=screen) + else: + is_app_describe = True + app_id = app["metadata"]["uuid"] + url = client.application.ITEM.format(app_id) + "/app_runlogs/list" + + payload = { + "filter": "application_reference=={};(type==action_runlog,type==audit_runlog,type==ngt_runlog,type==clone_action_runlog)".format( + app_id + ) + } + + def poll_func(): + # screen.echo("Polling app status...") + return client.application.poll_action_run(url, payload) + + def is_complete(response): + entities = response["entities"] + + if len(entities): + + # Sort entities based on creation time + sorted_entities = sorted( + entities, key=lambda x: int(x["metadata"]["creation_time"]) + ) + + # Create nodes of runlog tree and a map based on uuid + root = RunlogNode( + { + "metadata": {"uuid": app_id}, + "status": {"type": "app", "state": "", "name": app_name}, + } + ) + nodes = {} + nodes[app_id] = root + for runlog in sorted_entities: + uuid = runlog["metadata"]["uuid"] + nodes[str(uuid)] = RunlogNode(runlog, parent=root) + + # Attach parent to nodes + for runlog in sorted_entities: + uuid = runlog["metadata"]["uuid"] + parent_uuid = runlog["status"]["application_reference"]["uuid"] + node = nodes[str(uuid)] + node.parent = nodes[str(parent_uuid)] + + # Show Progress + # TODO - Draw progress bar + total_tasks = 0 + completed_tasks = 0 + for runlog in sorted_entities: + runlog_type = runlog["status"]["type"] + if runlog_type == "action_runlog": + total_tasks += 1 + state = runlog["status"]["state"] + if state in RUNLOG.STATUS.SUCCESS: + completed_tasks += 1 + + if not is_app_describe and total_tasks: + screen.clear() + progress = "{0:.2f}".format(completed_tasks / total_tasks * 100) + screen.print_at("Progress: {}%".format(progress), 0, 0) + + # Render Tree on next line + line = 1 + for pre, fill, node in RenderTree(root): + lines = json.dumps(node, cls=RunlogJSONEncoder).split("\\n") + for linestr in lines: + tabcount = linestr.count("\\t") + if not tabcount: + screen.print_at("{}{}".format(pre, linestr), 0, line) + else: + screen.print_at( + "{}{}".format(fill, linestr.replace("\\t", "")), 0, line + ) + line += 1 + screen.refresh() + + msg = "" + is_complete = True + if not is_app_describe: + for runlog in sorted_entities: + state = runlog["status"]["state"] + if state in RUNLOG.FAILURE_STATES: + msg = "Action failed." + is_complete = True + if state not in RUNLOG.TERMINAL_STATES: + is_complete = False + + if is_complete: + if not msg: + msg = "Action ran successfully." + + if os.isatty(sys.stdout.fileno()): + msg += " Exit screen? " + if not is_app_describe: + screen.print_at(msg, 0, line) + screen.refresh() + time.sleep(10) + return (is_complete, msg) + return (False, "") + + poll_runnnable(poll_func, is_complete, poll_interval=poll_interval) + + +def delete_app(app_names, soft=False): + client = get_api_client() + + for app_name in app_names: + app = _get_app(client, app_name) + app_id = app["metadata"]["uuid"] + action_label = "Soft Delete" if soft else "Delete" + LOG.info("Triggering {}".format(action_label)) + res, err = client.application.delete(app_id, soft_delete=soft) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + LOG.info("{} action triggered".format(action_label)) + response = res.json() + runlog_id = response["status"]["runlog_uuid"] + LOG.info("Action runlog uuid: {}".format(runlog_id)) + + +def get_action_var_val_from_launch_params(launch_vars, var_name): + """Returns variable value from launch params""" + + filtered_launch_vars = list( + filter( + lambda e: e["name"] == var_name, + launch_vars, + ) + ) + + if len(filtered_launch_vars) > 1: + LOG.error( + "Unable to populate runtime editables: Multiple matches for value of variable '{}'".format( + var_name + ) + ) + sys.exit(-1) + + if len(filtered_launch_vars) == 1: + return filtered_launch_vars[0].get("value", {}).get("value", None) + + return None + + +def get_patch_runtime_args( + app_uuid, deployments, patch_payload, ignore_runtime_variables, runtime_params_file +): + """Returns patch arguments or variable data""" + + patch_name = patch_payload["name"] + + patch_args = {} + patch_args["patch"] = patch_payload + patch_args["variables"] = [] + + attrs_list = patch_payload["attrs_list"] + + if ignore_runtime_variables: + return patch_args + + def disk_in_use(substrate, disk): + boot_disk = substrate["create_spec"]["resources"]["boot_config"]["boot_device"] + return ( + disk["disk_address"]["adapter_type"] + == boot_disk["disk_address"]["adapter_type"] + and disk["disk_address"]["device_index"] + == boot_disk["disk_address"]["device_index"] + ) + + def nic_name(nic): + return nic["subnet_reference"]["name"] if nic["subnet_reference"] else "" + + def disk_name(disk): + return "{}-{}".format( + disk["device_properties"]["disk_address"]["adapter_type"], + disk["device_properties"]["disk_address"]["device_index"], + ) + + nic_index_pattern = r".+?\[([0-9]*)\]" + + # If file is supplied for launch params + if runtime_params_file: + click.echo("Patching values for runtime variables under patch action ...") + + for attrs in attrs_list: + patch_items = attrs["data"] + + target_deployment_uuid = attrs["target_any_local_reference"]["uuid"] + target_deployment = next( + filter( + lambda deployment: deployment["uuid"] == target_deployment_uuid, + deployments, + ), + None, + ) + if target_deployment == None: + LOG.info( + "Target deployment with uuid {} not found. Skipping patch attributes editables".format( + target_deployment_uuid + ) + ) + continue + + substrate = target_deployment["substrate"] + + nic_in_use = -1 + nic_address = substrate["readiness_probe"]["address"] + readiness_probe_disabled = substrate["readiness_probe"][ + "disable_readiness_probe" + ] + if nic_address: + matches = re.search(nic_index_pattern, nic_address) + if matches != None and not readiness_probe_disabled: + nic_in_use = int(matches.group(1)) + + # Skip nics that are being used by the vm + nics = ( + patch_items["pre_defined_nic_list"] + if nic_in_use == -1 + else patch_items["pre_defined_nic_list"][nic_in_use + 1 :] + ) + + disks = patch_items["pre_defined_disk_list"] + + patch_attrs_editables = parse_launch_params_attribute( + launch_params=runtime_params_file, parse_attribute="patch_attrs" + ) + + editables = next( + filter( + lambda patch_attrs: patch_attrs["patch_attributes_uuid"] + == attrs["uuid"], + patch_attrs_editables, + ), + None, + ) + + if editables == None: + LOG.info( + "No patch editables found for patch attributes with uuid {}".format( + attrs["uuid"] + ) + ) + continue + + vm_config_editables = editables.get("vm_config", {}) + nic_editables = editables.get("nics", {}) + disk_editables = editables.get("disks", {}) + category_editables = editables.get("categories", {}) + + # VM config editables + for key, value in vm_config_editables.items(): + patch_item = patch_items[key + "_ruleset"] + if ( + patch_item["editable"] + and patch_item["min_value"] <= value <= patch_item["max_value"] + ): + if patch_item["value"] != value: + LOG.info( + "Attribute {} marked for modify with value {}".format( + key, value + ) + ) + patch_item["value"] = value + + # NIC delete + if patch_items["nic_delete_allowed"]: + for i, nic in enumerate(nics): + nic_index = i if nic_in_use == -1 else i + nic_in_use + if nic_index in nic_editables.get("delete", []): + LOG.info('NIC "{}" marked for deletion'.format(nic_name(nic))) + nic["operation"] = "delete" + + nics_not_added = [] + + # NIC add + for i, nic in enumerate(nics): + if nic["operation"] == "add" and nic["editable"]: + nic_edit = next( + filter( + lambda n: n["identifier"] == nic["identifier"], + nic_editables.get("add", []), + ), + None, + ) + if ( + nic_edit + and nic["subnet_reference"]["uuid"] + != nic_edit["subnet_reference"]["uuid"] + ): + LOG.info( + "NIC with identifier {} marked for modify with subnet {}".format( + nic["identifier"], nic_name(nic_edit) + ) + ) + nic["subnet_reference"] = nic_edit["subnet_reference"] + + if nic["operation"] == "add" and i in nic_editables.get("delete", []): + LOG.info( + "NIC with identifier {} skipped from addition".format( + nic["identifier"] + ) + ) + nics_not_added.append(i) + + # Skip adding nics that are deleted + nics = [nic for i, nic in enumerate(nics) if i not in nics_not_added] + + patch_items["pre_defined_nic_list"] = nics + + # Disk delete + if patch_items["disk_delete_allowed"]: + for i, disk in enumerate(disks): + if i in disk_editables.get("delete", []) and not disk_in_use( + substrate, disk["device_properties"] + ): + LOG.info("Disk {} marked for deletion".format(disk_name(disk))) + disk["operation"] = "delete" + + # Disk modify + for disk in disks: + if ( + disk["operation"] == "modify" + and disk["disk_size_mib"] + and disk["disk_size_mib"]["editable"] + ): + disk_edit = next( + filter( + lambda d: disk_name(d) == disk_name(disk), + disk_editables.get("modify", []), + ), + None, + ) + if ( + disk_edit + and disk["disk_size_mib"]["min_value"] + <= disk_edit["disk_size_mib"]["value"] + <= disk["disk_size_mib"]["max_value"] + ): + if ( + disk["disk_size_mib"]["value"] + != disk_edit["disk_size_mib"]["value"] + ): + LOG.info( + "Disk {} marked for modify with size {}".format( + disk_name(disk), disk_edit["disk_size_mib"]["value"] + ) + ) + disk["disk_size_mib"]["value"] = disk_edit["disk_size_mib"][ + "value" + ] + + disks_not_added = [] + + # Disk add + for i, disk in enumerate(disks): + if ( + disk["operation"] == "add" + and disk["disk_size_mib"] + and disk["disk_size_mib"]["editable"] + ): + disk_edit = next( + filter( + lambda d: i == d["index"], + disk_editables.get("add", []), + ), + None, + ) + if ( + disk_edit + and disk["disk_size_mib"]["min_value"] + <= disk_edit["disk_size_mib"]["value"] + <= disk["disk_size_mib"]["max_value"] + ): + if ( + disk["disk_size_mib"]["value"] + != disk_edit["disk_size_mib"]["value"] + ): + LOG.info( + "Disk {} marked for addition with size {}".format( + disk_name(disk), disk_edit["disk_size_mib"]["value"] + ) + ) + disk["disk_size_mib"]["value"] = disk_edit["disk_size_mib"][ + "value" + ] + if disk["operation"] == "add" and i in disk_editables.get("delete", []): + LOG.info("Disk {} skipped from addition".format(disk_name(disk))) + disks_not_added.append(i) + + # Skip adding disks that are deleted + disks = [disk for i, disk in enumerate(disks) if i not in disks_not_added] + + patch_items["pre_defined_disk_list"] = disks + + categories = patch_items["pre_defined_categories"] + + # Category delete + if patch_items["categories_delete_allowed"]: + for i, category in enumerate(categories): + if i in category_editables.get("delete", []): + LOG.info( + "Category {} marked for deletion".format(category["value"]) + ) + category["operation"] = "delete" + + # Category add + if patch_items["categories_add_allowed"]: + for category in category_editables.get("add", []): + LOG.info("Category {} marked for addition".format(category)) + patch_items["pre_defined_categories"].append( + {"operation": "add", "value": category} + ) + + return patch_args + + # Else prompt for runtime variable values + + click.echo("Please provide values for runtime variables in the patch action") + + for attrs in attrs_list: + + patch_items = attrs["data"] + target_deployment_uuid = attrs["target_any_local_reference"]["uuid"] + + click.echo( + "Patch editables targeted at deployment {} are as follows \n {}".format( + target_deployment_uuid, + json.dumps(patch_items, indent=4, separators=(",", ": ")), + ) + ) + + nic_in_use = -1 + disk_in_use = "" + + # find out which nic and disk is currently used + for deployment in deployments: + if deployment["uuid"] == target_deployment_uuid: + substrate = deployment["substrate"] + + nic_address = substrate["readiness_probe"]["address"] + readiness_probe_disabled = substrate["readiness_probe"][ + "disable_readiness_probe" + ] + if nic_address: + matches = re.search(nic_index_pattern, nic_address) + if matches != None and not readiness_probe_disabled: + nic_in_use = int(matches.group(1)) + + disk_address = substrate["create_spec"]["resources"]["boot_config"][ + "boot_device" + ]["disk_address"] + disk = "{}-{}".format( + disk_address["adapter_type"], disk_address["device_index"] + ) + disk_in_use = disk + + def prompt_value(patch_item, display_message): + min_value = ( + patch_item["value"] + if patch_item["operation"] == "increase" + else patch_item["min_value"] + ) + max_value = ( + patch_item["value"] + if patch_item["operation"] == "decrease" + else patch_item["max_value"] + ) + click.echo() + return click.prompt( + display_message, + default=highlight_text(patch_item["value"]), + type=click.IntRange(min=min_value, max=max_value), + ) + + def prompt_bool(display_message): + click.echo() + return click.prompt( + display_message, + default=highlight_text("n"), + type=click.Choice(["y", "n"]), + ) + + click.echo("\n\t\t\t", nl=False) + click.secho("VM CONFIGURATION", underline=True, bold=True) + + # Sockets, cores and memory modify + display_names = { + "num_sockets_ruleset": "vCPUs", + "num_vcpus_per_socket_ruleset": "Cores per vCPU", + "memory_size_mib_ruleset": "Memory (MiB)", + } + for ruleset in display_names: + patch_item = patch_items[ruleset] + if patch_item["editable"]: + new_val = prompt_value( + patch_item, + "Enter value for {}".format(display_names[ruleset]), + ) + patch_item["value"] = new_val + + nics = ( + patch_items["pre_defined_nic_list"] + if nic_in_use == -1 + else patch_items["pre_defined_nic_list"][nic_in_use + 1 :] + ) + + click.echo("\n\t\t\t", nl=False) + click.secho("NETWORK CONFIGURATION", underline=True, bold=True) + + # NIC add + nics_not_added = [] + for i, nic in enumerate(nics): + if nic["operation"] == "add": + to_add = prompt_bool( + 'Do you want to add the NIC "{}" with identifier {}'.format( + nic["subnet_reference"]["name"], nic["identifier"] + ) + ) + if to_add == "n": + nics_not_added.append(i) + + # remove NICs not added from patch list + nics = [nic for i, nic in enumerate(nics) if i not in nics_not_added] + + # NIC delete + if patch_items["nic_delete_allowed"] and len(nics) > 0: + to_delete = prompt_bool("Do you want to delete a NIC") + + if to_delete == "y": + click.echo() + click.echo("Choose from following options") + for i, nic in enumerate(nics): + click.echo( + "\t{}. NIC-{} {}".format( + highlight_text(i), i + 1, nic["subnet_reference"]["name"] + ) + ) + + click.echo() + nic_to_delete = click.prompt( + "Choose nic to delete", + default=0, + type=click.IntRange(max=len(nics)), + ) + + nics[nic_to_delete]["operation"] = "delete" + LOG.info( + "Delete NIC-{} {}".format( + nic_to_delete + 1, + nics[nic_to_delete]["subnet_reference"]["name"], + ) + ) + patch_items["pre_defined_nic_list"] = nics + + click.echo("\n\t\t\t", nl=False) + click.secho("STORAGE CONFIGURATION", underline=True, bold=True) + + # Disk delete + disks = list( + filter( + lambda disk: disk_name(disk) != disk_in_use, + patch_items["pre_defined_disk_list"], + ) + ) + if patch_items["disk_delete_allowed"] and len(disks) > 0: + to_delete = prompt_bool("Do you want to delete a disk") + if to_delete == "y": + click.echo() + click.echo("Choose from following options") + for i, disk in enumerate(disks): + click.echo( + "\t{}. DISK-{} {} {}".format( + highlight_text(i), + i + 1, + disk_name(disk), + disk["disk_size_mib"]["value"], + ) + ) + click.echo() + disk_to_delete = click.prompt( + "Choose disk to delete", + default=0, + type=click.IntRange(max=len(disks)), + ) + disks[disk_to_delete]["operation"] = "delete" + LOG.info( + "Delete DISK-{} {}".format( + disk_to_delete + 1, disk_name(disks[disk_to_delete]) + ) + ) + + # Disk modify + for disk in disks: + disk_size = disk["disk_size_mib"] + if disk_size != None and disk_size["editable"]: + new_val = prompt_value( + disk_size, + "Enter size for disk {}".format(disk_name(disk)), + ) + disk_size["value"] = new_val + patch_items["pre_defined_disk_list"] = disks + + click.echo("\n\t\t\t", nl=False) + click.secho("CATEGORIES", underline=True, bold=True) + + # Category delete + categories = patch_items["pre_defined_categories"] + if patch_items["categories_delete_allowed"] and len(categories) > 0: + to_delete = prompt_bool("Do you want to delete a category") + if to_delete == "y": + click.echo() + click.echo("Choose from following options") + for i, category in enumerate(categories): + click.echo("\t{}. {}".format(highlight_text(i), category["value"])) + click.echo() + category_to_delete = click.prompt( + "Choose category to delete", + default=0, + type=click.IntRange(max=len(categories)), + ) + categories[category_to_delete]["operation"] = "delete" + LOG.info( + "Delete category {}".format(categories[category_to_delete]["value"]) + ) + + # Category add + if patch_items["categories_add_allowed"]: + to_add = prompt_bool("Add a category?") + while to_add == "y": + click.echo() + new_val = click.prompt( + "Enter value for category", default="", show_default=False + ) + patch_items["pre_defined_categories"].append( + {"operation": "add", "value": new_val} + ) + to_add = prompt_bool("Add another category?") + + return patch_args + + +def get_action_runtime_args( + app_uuid, action_payload, patch_editables, runtime_params_file +): + """Returns action arguments or variable data""" + + action_name = action_payload["name"] + + runtime_vars = {} + runbook_vars = action_payload["runbook"].get("variable_list", None) or [] + for _var in runbook_vars: + editable_dict = _var.get("editables", None) or {} + if editable_dict.get("value", False): + runtime_vars[_var["name"]] = _var + + client = get_api_client() + res, err = client.application.action_variables( + app_id=app_uuid, action_name=action_name + ) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + action_args = res.json() + + # If no need to patch editable or there is not runtime var, return action args received from api + if not (patch_editables and runtime_vars): + return action_args or [] + + # If file is supplied for launch params + if runtime_params_file: + click.echo("Patching values for runtime variables under action ...") + + parsed_runtime_vars = parse_launch_runtime_vars( + launch_params=runtime_params_file + ) + for _arg in action_args: + var_name = _arg["name"] + if var_name in runtime_vars: + + new_val = get_action_var_val_from_launch_params( + launch_vars=parsed_runtime_vars, var_name=var_name + ) + if new_val is not None: + _arg["value"] = new_val + + return action_args + + # Else prompt for runtime variable values + click.echo( + "Found runtime variables in action. Please provide values for runtime variables" + ) + + for _arg in action_args: + if _arg["name"] in runtime_vars: + + _var = runtime_vars[_arg["name"]] + options = _var.get("options", {}) + choices = options.get("choices", []) + click.echo("") + if choices: + click.echo("Choose from given choices: ") + for choice in choices: + click.echo("\t{}".format(highlight_text(repr(choice)))) + + default_val = _arg["value"] + is_secret = _var.get("type") == "SECRET" + + new_val = click.prompt( + "Value for variable '{}' [{}]".format( + _arg["name"], + highlight_text(default_val if not is_secret else "*****"), + ), + default=default_val, + show_default=False, + hide_input=is_secret, + type=click.Choice(choices) if choices else type(default_val), + show_choices=False, + ) + + _arg["value"] = new_val + + return action_args + + +def run_patches( + app_name, + patch_name, + watch, + ignore_runtime_variables=False, + runtime_params_file=None, +): + client = get_api_client() + + app = _get_app(client, app_name) + app_spec = app["spec"] + app_id = app["metadata"]["uuid"] + + calm_patch_name = "patch_" + patch_name.lower() + patch_payload = next( + ( + patch + for patch in app_spec["resources"]["patch_list"] + if patch["name"] == calm_patch_name or patch["name"] == patch_name + ), + None, + ) + if not patch_payload: + LOG.error("No patch found matching name {}".format(patch_name)) + sys.exit(-1) + + patch_id = patch_payload["uuid"] + + deployments = app_spec["resources"]["deployment_list"] + + patch_args = get_patch_runtime_args( + app_uuid=app_id, + deployments=deployments, + patch_payload=patch_payload, + ignore_runtime_variables=ignore_runtime_variables, + runtime_params_file=runtime_params_file, + ) + + # Hit action run api (with metadata and minimal spec: [args, target_kind, target_uuid]) + app.pop("status") + app["spec"] = { + "args": patch_args, + "target_kind": "Application", + "target_uuid": app_id, + } + res, err = client.application.run_patch(app_id, patch_id, app) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + runlog_uuid = response["status"]["runlog_uuid"] + click.echo( + "Patch is triggered. Got Runlog uuid: {}".format(highlight_text(runlog_uuid)) + ) + + if watch: + + def display_patch(screen): + screen.clear() + screen.print_at( + "Fetching runlog tree for patch '{}'".format(patch_name), 0, 0 + ) + screen.refresh() + watch_patch_or_action( + runlog_uuid, + app_name, + get_api_client(), + screen, + ) + screen.wait_for_input(10.0) + + Display.wrapper(display_patch, watch=True) + + else: + click.echo("") + click.echo( + "# Hint1: You can run patch in watch mode using: calm run patch {} --app {} --watch".format( + patch_name, app_name + ) + ) + click.echo( + "# Hint2: You can watch patch runlog on the app using: calm watch action_runlog {} --app {}".format( + runlog_uuid, app_name + ) + ) + + +def get_snapshot_name_arg(config, config_task_id): + default_value = next( + ( + var["value"] + for var in config["variable_list"] + if var["name"] == "snapshot_name" + ), + "", + ) + val = click.prompt( + "Value for Snapshot Name [{}]".format(highlight_text(repr(default_value))), + default=default_value, + show_default=False, + ) + return {"name": "snapshot_name", "value": val, "task_uuid": config_task_id} + + +def get_recovery_point_group_arg(config, config_task_id, recovery_groups): + choices = {} + for i, rg in enumerate(recovery_groups): + choices[i + 1] = { + "label": "{}. {} [Created On: {} Expires On: {}]".format( + i + 1, + rg["status"]["name"], + time.strftime( + "%Y-%m-%d %H:%M:%S", + time.gmtime( + rg["status"]["recovery_point_info_list"][0]["creation_time"] + // 1000000 + ), + ), + time.strftime( + "%Y-%m-%d %H:%M:%S", + time.gmtime( + rg["status"]["recovery_point_info_list"][0]["expiration_time"] + // 1000000 + ), + ), + ), + "uuid": rg["status"]["uuid"], + } + if not choices: + LOG.error( + "No recovery group found. Please take a snapshot before running restore action" + ) + sys.exit(-1) + default_idx = 1 + + click.echo("Choose from given choices: ") + for choice in choices.values(): + click.echo("\t{}".format(highlight_text(repr(choice["label"])))) + selected_val = click.prompt( + "Selected Recovery Group [{}]".format(highlight_text(repr(default_idx))), + default=default_idx, + show_default=False, + ) + if selected_val not in choices: + LOG.error( + "Invalid value {}, not present in choices: {}".format( + selected_val, choices.keys() + ) + ) + sys.exit(-1) + return { + "name": "recovery_point_group_uuid", + "value": choices[selected_val]["uuid"], + "task_uuid": config_task_id, + } + + +def run_actions( + app_name, action_name, watch, patch_editables=False, runtime_params_file=None +): + client = get_api_client() + if action_name.lower() == SYSTEM_ACTIONS.CREATE: + click.echo( + "The Create Action is triggered automatically when you deploy a blueprint. It cannot be run separately." + ) + return + if action_name.lower() == SYSTEM_ACTIONS.DELETE: + delete_app([app_name]) # Because Delete requries a differernt API workflow + return + if action_name.lower() == SYSTEM_ACTIONS.SOFT_DELETE: + delete_app( + [app_name], soft=True + ) # Because Soft Delete also requries the differernt API workflow + return + + app = _get_app(client, app_name) + app_spec = app["spec"] + app_id = app["metadata"]["uuid"] + + calm_action_name = "action_" + action_name.lower() + action_payload = next( + ( + action + for action in app_spec["resources"]["action_list"] + if action["name"] == calm_action_name or action["name"] == action_name + ), + None, + ) + if not action_payload: + LOG.error("No action found matching name {}".format(action_name)) + sys.exit(-1) + + action_id = action_payload["uuid"] + + action_args = get_action_runtime_args( + app_uuid=app_id, + action_payload=action_payload, + patch_editables=patch_editables, + runtime_params_file=runtime_params_file, + ) + + # Hit action run api (with metadata and minimal spec: [args, target_kind, target_uuid]) + status = app.pop("status") + config_list = status["resources"]["snapshot_config_list"] + config_list.extend(status["resources"]["restore_config_list"]) + for task in action_payload["runbook"]["task_definition_list"]: + if task["type"] == "CALL_CONFIG": + config = next( + config + for config in config_list + if config["uuid"] == task["attrs"]["config_spec_reference"]["uuid"] + ) + if config["type"] == "AHV_SNAPSHOT": + action_args.append(get_snapshot_name_arg(config, task["uuid"])) + elif config["type"] == "AHV_RESTORE": + substrate_id = next( + ( + dep["substrate_configuration"]["uuid"] + for dep in status["resources"]["deployment_list"] + if dep["uuid"] + == config["attrs_list"][0]["target_any_local_reference"]["uuid"] + ), + None, + ) + api_filter = "" + if substrate_id: + api_filter = "substrate_reference==" + substrate_id + res, err = client.application.get_recovery_groups(app_id, api_filter) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + action_args.append( + get_recovery_point_group_arg( + config, task["uuid"], res.json()["entities"] + ) + ) + + app["spec"] = { + "args": action_args, + "target_kind": "Application", + "target_uuid": app_id, + } + res, err = client.application.run_action(app_id, action_id, app) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + runlog_uuid = response["status"]["runlog_uuid"] + click.echo( + "Action is triggered. Got Action Runlog uuid: {}".format( + highlight_text(runlog_uuid) + ) + ) + + if watch: + + def display_action(screen): + screen.clear() + screen.print_at( + "Fetching runlog tree for action '{}'".format(action_name), 0, 0 + ) + screen.refresh() + watch_patch_or_action( + runlog_uuid, + app_name, + get_api_client(), + screen, + ) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + + else: + click.echo("") + click.echo( + "# Hint1: You can run action in watch mode using: calm run action {} --app {} --watch".format( + action_name, app_name + ) + ) + click.echo( + "# Hint2: You can watch action runlog on the app using: calm watch action_runlog {} --app {}".format( + runlog_uuid, app_name + ) + ) + + +def poll_runnnable(poll_func, completion_func, poll_interval=10): + # Poll every 10 seconds on the app status, for 5 mins + maxWait = 5 * 60 + count = 0 + while count < maxWait: + # call status api + res, err = poll_func() + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + (completed, msg) = completion_func(response) + if completed: + # click.echo(msg) + break + count += poll_interval + time.sleep(poll_interval) + + +def download_runlog(runlog_id, app_name, file_name): + """Download runlogs, given runlog uuid and app name""" + + client = get_api_client() + app = _get_app(client, app_name) + app_id = app["metadata"]["uuid"] + + if not file_name: + file_name = "runlog_{}.zip".format(runlog_id) + + res, err = client.application.download_runlog(app_id, runlog_id) + if not err: + with open(file_name, "wb") as fw: + fw.write(res.content) + click.echo("Runlogs saved as {}".format(highlight_text(file_name))) + else: + LOG.error("[{}] - {}".format(err["code"], err["error"])) diff --git a/framework/calm/dsl/cli/bp_commands.py b/framework/calm/dsl/cli/bp_commands.py new file mode 100644 index 0000000..85ff007 --- /dev/null +++ b/framework/calm/dsl/cli/bp_commands.py @@ -0,0 +1,406 @@ +import json +import time +import sys +import click + +from calm.dsl.api import get_api_client +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle + +from .utils import Display +from .main import get, compile, describe, create, launch, delete, decompile, format +from .bps import ( + get_blueprint_list, + describe_bp, + format_blueprint_command, + compile_blueprint_command, + launch_blueprint_simple, + patch_bp_if_required, + delete_blueprint, + decompile_bp, + create_blueprint_from_json, + create_blueprint_from_dsl, +) +from .apps import watch_app +from .utils import FeatureDslOption + +LOG = get_logging_handle(__name__) + + +@get.command("bps") +@click.option("--name", "-n", default=None, help="Search for blueprints by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter blueprints by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only blueprint names." +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_blueprint_list(name, filter_by, limit, offset, quiet, all_items, out): + """Get the blueprints, optionally filtered by a string""" + + get_blueprint_list(name, filter_by, limit, offset, quiet, all_items, out) + + +def _get_nested_messages(path, obj, message_list): + """Get nested message list objects from the blueprint""" + if isinstance(obj, list): + for index, sub_obj in enumerate(obj): + _get_nested_messages(path, sub_obj, message_list) + elif isinstance(obj, dict): + name = obj.get("name", "") + if name and isinstance(name, str): + path = path + ("." if path else "") + name + for key in obj: + sub_obj = obj[key] + if key == "message_list": + for message in sub_obj: + message["path"] = path + message_list.append(message) + continue + _get_nested_messages(path, sub_obj, message_list) + + +@describe.command("bp") +@click.argument("bp_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_bp(bp_name, out): + """Describe a blueprint""" + + describe_bp(bp_name, out) + + +@format.command("bp") +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Blueprint file to format", +) +def _format_blueprint_command(bp_file): + """Formats blueprint file using black""" + + format_blueprint_command(bp_file) + + +@compile.command("bp") +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Blueprint file to upload", +) +@click.option( + "--brownfield_deployments", + "-b", + "brownfield_deployment_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Brownfield Deployment file", +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["json", "yaml"]), + default="json", + help="output format", +) +def _compile_blueprint_command(bp_file, brownfield_deployment_file, out): + """Compiles a DSL (Python) blueprint into JSON or YAML""" + compile_blueprint_command(bp_file, brownfield_deployment_file, out) + + +@decompile.command("bp", experimental=True) +@click.argument("name", required=False) +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to Blueprint file", +) +@click.option( + "--with_secrets", + "-w", + is_flag=True, + default=False, + help="Interactive Mode to provide the value for secrets", +) +@click.option( + "--prefix", + "-p", + default="", + help="Prefix used for appending to entities name(Reserved name cases)", +) +@click.option( + "--dir", + "-d", + "bp_dir", + default=None, + help="Blueprint directory location used for placing decompiled entities", +) +def _decompile_bp(name, bp_file, with_secrets, prefix, bp_dir): + """Decompiles blueprint present on server or json file""" + + decompile_bp(name, bp_file, with_secrets, prefix, bp_dir) + + +@create.command("bp") +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Blueprint file to upload", +) +@click.option("--name", "-n", default=None, help="Blueprint name (Optional)") +@click.option( + "--description", "-d", default=None, help="Blueprint description (Optional)" +) +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="Deletes existing blueprint with the same name before create.", +) +def create_blueprint_command(bp_file, name, description, force): + """Creates a blueprint""" + + client = get_api_client() + + if bp_file.endswith(".json"): + res, err = create_blueprint_from_json( + client, bp_file, name=name, description=description, force_create=force + ) + elif bp_file.endswith(".py"): + res, err = create_blueprint_from_dsl( + client, bp_file, name=name, description=description, force_create=force + ) + else: + LOG.error("Unknown file format {}".format(bp_file)) + return + + if err: + LOG.error(err["error"]) + return + + bp = res.json() + bp_uuid = bp["metadata"]["uuid"] + bp_name = bp["metadata"]["name"] + bp_status = bp.get("status", {}) + bp_state = bp_status.get("state", "DRAFT") + LOG.debug("Blueprint {} has state: {}".format(bp_name, bp_state)) + + if bp_state != "ACTIVE": + msg_list = [] + _get_nested_messages("", bp_status, msg_list) + + if not msg_list: + LOG.error("Blueprint {} created with errors.".format(bp_name)) + LOG.debug(json.dumps(bp_status)) + sys.exit(-1) + + msgs = [] + for msg_dict in msg_list: + msg = "" + path = msg_dict.get("path", "") + if path: + msg = path + ": " + msgs.append(msg + msg_dict.get("message", "")) + + LOG.error( + "Blueprint {} created with {} error(s):".format(bp_name, len(msg_list)) + ) + click.echo("\n".join(msgs)) + sys.exit(-1) + + LOG.info("Blueprint {} created successfully.".format(bp_name)) + + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/blueprints/{}".format( + pc_ip, pc_port, bp_uuid + ) + stdout_dict = {"name": bp_name, "link": link, "state": bp_state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +@launch.command("bp") +@click.argument("blueprint_name") +@click.option( + "--with_secrets", + "-ws", + is_flag=True, + default=False, + help="Preserve secrets while launching the blueprint", +) +@click.option( + "--environment", "-e", default=None, help="Environment for the application" +) +@click.option("--app_name", "-a", default=None, help="Name of your app") +@click.option( + "--profile_name", + "-p", + default=None, + help="Name of app profile to be used for blueprint launch", +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults", +) +@click.option( + "--launch_params", + "-l", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +@click.option( + "--poll-interval", + "poll_interval", + "-pi", + type=int, + default=10, + show_default=True, + help="Give polling interval", +) +@click.option( + "--brownfield_deployments", + "-b", + "brownfield_deployment_file", + type=FeatureDslOption(feature_min_version="3.3.0"), + help="Path of Brownfield Deployment file (Added in 3.3)", +) +def launch_blueprint_command( + blueprint_name, + environment, + with_secrets, + app_name, + ignore_runtime_variables, + profile_name, + launch_params, + watch, + poll_interval, + blueprint=None, + brownfield_deployment_file=None, +): + """Launches a blueprint. + All runtime variables will be prompted by default. When passing the 'ignore_runtime_variables' flag, no variables will be prompted and all default values will be used. + The blueprint default values can be overridden by passing a Python file via 'launch_params'. Any variable not defined in the Python file will keep the default value defined in the blueprint. When passing a Python file, no variables will be prompted. + + \b + Note: Dynamic variables will not have a default value. User have to select an option during launch. + + \b + >: launch_params: Python file consisting of variables 'variable_list' and 'substrate_list' + Ex: variable_list = [ + { + "value": {"value": }, + "context": + "name": "" + } + ] + substrate_list = [ + { + "value": { + + }, + "name": , + } + ] + deployment_list = [ + { + "value": { + + }, + "name": , + } + ] + credential_list = [ + { + "value": { + + }, + "name": , + } + ] + snapshot_config_list = [ + { + "value": { + "attrs_list": [ + + ] + }, + "name": , + } + ] + Sample context for variables: + 1. context = "" # For variable under profile + 2. context = "" # For variable under service + + \b + >: brownfield_deployments: Python file containing brownfield deployments + """ + + app_name = app_name or "App-{}-{}".format(blueprint_name, int(time.time())) + blueprint_name, blueprint = patch_bp_if_required( + with_secrets, environment, blueprint_name, profile_name + ) + + launch_blueprint_simple( + blueprint_name, + app_name, + blueprint=blueprint, + profile_name=profile_name, + patch_editables=not ignore_runtime_variables, + launch_params=launch_params, + brownfield_deployment_file=brownfield_deployment_file, + ) + if watch: + + def display_action(screen): + watch_app(app_name, screen, poll_interval=poll_interval) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + LOG.info("Action runs completed for app {}".format(app_name)) + + +@delete.command("bp") +@click.argument("blueprint_names", nargs=-1) +def _delete_blueprint(blueprint_names): + """Deletes a blueprint""" + + delete_blueprint(blueprint_names) diff --git a/framework/calm/dsl/cli/bps.py b/framework/calm/dsl/cli/bps.py new file mode 100644 index 0000000..4de809f --- /dev/null +++ b/framework/calm/dsl/cli/bps.py @@ -0,0 +1,1855 @@ +from re import sub +import time +import json +import sys +import os +import uuid +from pprint import pprint +import pathlib + +from ruamel import yaml +import arrow +import click +from prettytable import PrettyTable +from copy import deepcopy +from black import format_file_in_place, WriteBack, FileMode + +from calm.dsl.builtins import ( + Blueprint, + SimpleBlueprint, + VmBlueprint, + create_blueprint_payload, + BlueprintType, + MetadataType, + get_valid_identifier, + file_exists, + get_dsl_metadata_map, + init_dsl_metadata_map, +) +from calm.dsl.builtins.models.metadata_payload import get_metadata_payload +from calm.dsl.config import get_context +from calm.dsl.api import get_api_client +from calm.dsl.store import Cache +from calm.dsl.decompile.decompile_render import create_bp_dir +from calm.dsl.decompile.file_handler import get_bp_dir + +from .utils import ( + get_name_query, + get_states_filter, + highlight_text, + import_var_from_file, +) +from .secrets import find_secret, create_secret +from .constants import BLUEPRINT +from .environments import get_project_environment +from calm.dsl.tools import get_module_from_file +from calm.dsl.builtins import Brownfield as BF +from calm.dsl.providers import get_provider +from calm.dsl.providers.plugins.ahv_vm.main import AhvNew +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle +from calm.dsl.builtins.models.calm_ref import Ref + +LOG = get_logging_handle(__name__) + + +def get_blueprint_list(name, filter_by, limit, offset, quiet, all_items, out): + """Get the blueprints, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if all_items: + filter_query += get_states_filter(BLUEPRINT.STATES) + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.blueprint.list(params=params) + + if err: + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch blueprints from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No blueprint found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "BLUEPRINT TYPE", + "APPLICATION COUNT", + "PROJECT", + "STATE", + "CREATED ON", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + bp_type = ( + "Single VM" + if "categories" in metadata + and "TemplateType" in metadata["categories"] + and metadata["categories"]["TemplateType"] == "Vm" + else "Multi VM/Pod" + ) + + project = ( + metadata["project_reference"]["name"] + if "project_reference" in metadata + else None + ) + + creation_time = int(metadata["creation_time"]) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(bp_type), + highlight_text(row["application_count"]), + highlight_text(project), + highlight_text(row["state"]), + highlight_text(time.ctime(creation_time)), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(row["uuid"]), + ] + ) + click.echo(table) + + +def describe_bp(blueprint_name, out): + """Displays blueprint data""" + + client = get_api_client() + bp = get_blueprint(blueprint_name, all=True) + + if out == "json": + bp.pop("status", None) + click.echo(json.dumps(bp, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Blueprint Summary----\n") + click.echo( + "Name: " + + highlight_text(blueprint_name) + + " (uuid: " + + highlight_text(bp["metadata"]["uuid"]) + + ")" + ) + click.echo("Description: " + highlight_text(bp["status"]["description"])) + click.echo("Status: " + highlight_text(bp["status"]["state"])) + click.echo( + "Owner: " + highlight_text(bp["metadata"]["owner_reference"]["name"]), nl=False + ) + click.echo( + " Project: " + highlight_text(bp["metadata"]["project_reference"]["name"]) + ) + + created_on = int(bp["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + bp_resources = bp.get("status").get("resources", {}) + profile_list = bp_resources.get("app_profile_list", []) + click.echo("Application Profiles [{}]:".format(highlight_text(len(profile_list)))) + for profile in profile_list: + profile_name = profile["name"] + click.echo("\t" + highlight_text(profile_name)) + + bp_deployments = profile.get("deployment_create_list", []) + click.echo("\tDeployments[{}]:".format(highlight_text(len(bp_deployments)))) + for dep in bp_deployments: + click.echo("\t\t{}".format(highlight_text(dep["name"]))) + + dep_substrate = None + for sub in bp_resources.get("substrate_definition_list"): + if sub.get("uuid") == dep.get("substrate_local_reference", {}).get( + "uuid" + ): + dep_substrate = sub + + sub_type = dep_substrate.get("type", "") + account = None + if sub_type != "EXISTING_VM": + account_uuid = dep_substrate["create_spec"]["resources"]["account_uuid"] + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=account_uuid + ) + if sub_type == "AHV_VM": + account_uuid = account_cache_data["data"]["pc_account_uuid"] + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=account_uuid + ) + + account = account_cache_data["name"] + + click.echo("\t\tSubstrate:") + click.echo("\t\t\t{}".format(highlight_text(dep_substrate["name"]))) + click.echo("\t\t\tType: {}".format(highlight_text(sub_type))) + if account: + click.echo("\t\t\tAccount: {}".format(highlight_text(account))) + + click.echo("\tActions[{}]:".format(highlight_text(len(profile["action_list"])))) + for action in profile["action_list"]: + action_name = action["name"] + if action_name.startswith("action_"): + prefix_len = len("action_") + action_name = action_name[prefix_len:] + click.echo("\t\t" + highlight_text(action_name)) + + service_list = ( + bp.get("status").get("resources", {}).get("service_definition_list", []) + ) + click.echo("Services [{}]:".format(highlight_text(len(service_list)))) + for service in service_list: + service_name = service["name"] + click.echo("\t" + highlight_text(service_name)) + # click.echo("\tActions:") + + +def get_blueprint_module_from_file(bp_file): + """Returns Blueprint module given a user blueprint dsl file (.py)""" + return get_module_from_file("calm.dsl.user_bp", bp_file) + + +def get_blueprint_class_from_module(user_bp_module): + """Returns blueprint class given a module""" + + UserBlueprint = None + for item in dir(user_bp_module): + obj = getattr(user_bp_module, item) + if isinstance(obj, (type(Blueprint), type(SimpleBlueprint), type(VmBlueprint))): + if obj.__bases__[0] in (Blueprint, SimpleBlueprint, VmBlueprint): + UserBlueprint = obj + + return UserBlueprint + + +def get_brownfield_deployment_classes(brownfield_deployment_file=None): + """Get brownfield deployment classes""" + + bf_deployments = [] + if not brownfield_deployment_file: + return [] + + bd_module = get_module_from_file( + "calm.dsl.brownfield_deployment", brownfield_deployment_file + ) + for item in dir(bd_module): + obj = getattr(bd_module, item) + if isinstance(obj, type(BF.Deployment)): + if obj.__bases__[0] == (BF.Deployment): + bf_deployments.append(obj) + + return bf_deployments + + +def compile_blueprint(bp_file, brownfield_deployment_file=None): + + # Constructing metadata payload + # Note: This should be constructed before loading bp module. As metadata will be used while getting bp_payload + metadata_payload = get_metadata_payload(bp_file) + + user_bp_module = get_blueprint_module_from_file(bp_file) + UserBlueprint = get_blueprint_class_from_module(user_bp_module) + if UserBlueprint is None: + return None + + # Fetching bf_deployments + bf_deployments = get_brownfield_deployment_classes(brownfield_deployment_file) + if bf_deployments: + bf_dep_map = {bd.__name__: bd for bd in bf_deployments} + for pf in UserBlueprint.profiles: + for ind, dep in enumerate(pf.deployments): + if dep.__name__ in bf_dep_map: + bf_dep = bf_dep_map[dep.__name__] + # Add the packages and substrates from deployment + bf_dep.packages = dep.packages + bf_dep.substrate = dep.substrate + + # If name attribute not exists in brownfield deployment file and given in blueprint file, + # Use the one that is given in blueprint file + if dep.name and (not bf_dep.name): + bf_dep.name = dep.name + + # Replacing new deployment in profile.deployments + pf.deployments[ind] = bf_dep + + ContextObj = get_context() + project_config = ContextObj.get_project_config() + + bp_payload = None + if isinstance(UserBlueprint, type(SimpleBlueprint)): + bp_payload = UserBlueprint.make_bp_dict() + if "project_reference" in metadata_payload: + bp_payload["metadata"]["project_reference"] = metadata_payload[ + "project_reference" + ] + else: + project_name = project_config["name"] + bp_payload["metadata"]["project_reference"] = Ref.Project(project_name) + else: + if isinstance(UserBlueprint, type(VmBlueprint)): + UserBlueprint = UserBlueprint.make_bp_obj() + + UserBlueprintPayload, _ = create_blueprint_payload( + UserBlueprint, metadata=metadata_payload + ) + bp_payload = UserBlueprintPayload.get_dict() + + # Adding the display map to client attr + display_name_map = get_dsl_metadata_map() + bp_payload["spec"]["resources"]["client_attrs"] = {"None": display_name_map} + + # Note - Install/Uninstall runbooks are not actions in Packages. + # Remove package actions after compiling. + cdict = bp_payload["spec"]["resources"] + for package in cdict["package_definition_list"]: + if "action_list" in package: + del package["action_list"] + + return bp_payload + + +def create_blueprint( + client, bp_payload, name=None, description=None, force_create=False +): + + bp_payload.pop("status", None) + + credential_list = bp_payload["spec"]["resources"]["credential_definition_list"] + for cred in credential_list: + if cred["secret"].get("secret", None): + secret = cred["secret"].pop("secret") + + try: + value = find_secret(secret) + + except ValueError: + click.echo( + "\nNo secret corresponding to '{}' found !!!\n".format(secret) + ) + value = click.prompt("Please enter its value", hide_input=True) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to store it locally")), + default="n", + ) + if choice[0] == "y": + create_secret(secret, value) + + cred["secret"]["value"] = value + + if name: + bp_payload["spec"]["name"] = name + bp_payload["metadata"]["name"] = name + + if description: + bp_payload["spec"]["description"] = description + + bp_resources = bp_payload["spec"]["resources"] + bp_name = bp_payload["spec"]["name"] + bp_desc = bp_payload["spec"]["description"] + bp_metadata = bp_payload["metadata"] + + return client.blueprint.upload_with_secrets( + bp_name, + bp_desc, + bp_resources, + bp_metadata=bp_metadata, + force_create=force_create, + ) + + +def create_blueprint_from_json( + client, path_to_json, name=None, description=None, force_create=False +): + """ + creates blueprint from the bp json supplied. + NOTE: Project mentioned in the json file remains unchanged + """ + + with open(path_to_json, "r") as f: + bp_payload = json.loads(f.read()) + + ContextObj = get_context() + project_config = ContextObj.get_project_config() + configured_project = project_config["name"] + + # If no project is given in payload, it is created with default project + bp_project_name = "default" + + if ( + bp_payload.get("metadata") + and bp_payload["metadata"].get("project_reference") + and bp_payload["metadata"]["project_reference"].get("uuid") + ): + bp_project_uuid = bp_payload["metadata"]["project_reference"]["uuid"] + if bp_project_uuid: + bp_project_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.PROJECT, uuid=bp_project_uuid + ) + if bp_project_data: + bp_project_name = bp_project_data["name"] + + if bp_project_name != configured_project: + LOG.warning( + "Project in supplied json is different from configured project('{}')".format( + configured_project + ) + ) + + return create_blueprint( + client, + bp_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_blueprint_from_dsl( + client, bp_file, name=None, description=None, force_create=False +): + + bp_payload = compile_blueprint(bp_file) + if bp_payload is None: + err_msg = "User blueprint not found in {}".format(bp_file) + err = {"error": err_msg, "code": -1} + return None, err + + # Brownfield blueprints creation should be blocked using dsl file + if bp_payload["spec"]["resources"].get("type", "") == "BROWNFIELD": + LOG.error( + "Command not allowed for brownfield blueprints. Please use 'calm create app -f ' for creating brownfield application" + ) + sys.exit(-1) + + return create_blueprint( + client, + bp_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def decompile_bp(name, bp_file, with_secrets=False, prefix="", bp_dir=None): + """helper to decompile blueprint""" + + if name and bp_file: + LOG.error( + "Please provide either blueprint file location or server blueprint name" + ) + sys.exit(-1) + + if name: + decompile_bp_from_server( + name=name, with_secrets=with_secrets, prefix=prefix, bp_dir=bp_dir + ) + + elif bp_file: + decompile_bp_from_file( + filename=bp_file, with_secrets=with_secrets, prefix=prefix, bp_dir=bp_dir + ) + + else: + LOG.error( + "Please provide either blueprint file location or server blueprint name" + ) + sys.exit(-1) + + +def decompile_bp_from_server(name, with_secrets=False, prefix="", bp_dir=None): + """decompiles the blueprint by fetching it from server""" + + client = get_api_client() + blueprint = get_blueprint(name) + bp_uuid = blueprint["metadata"]["uuid"] + + res, err = client.blueprint.export_file(bp_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + _decompile_bp( + bp_payload=res, with_secrets=with_secrets, prefix=prefix, bp_dir=bp_dir + ) + + +def decompile_bp_from_file(filename, with_secrets=False, prefix="", bp_dir=None): + """decompile blueprint from local blueprint file""" + + # ToDo - Fix this + bp_payload = json.loads(open(filename).read()) + # bp_payload = read_spec(filename) + _decompile_bp( + bp_payload=bp_payload, with_secrets=with_secrets, prefix=prefix, bp_dir=bp_dir + ) + + +def _decompile_bp(bp_payload, with_secrets=False, prefix="", bp_dir=None): + """decompiles the blueprint from payload""" + + blueprint = bp_payload["spec"]["resources"] + blueprint_name = bp_payload["spec"].get("name", "DslBlueprint") + blueprint_description = bp_payload["spec"].get("description", "") + + blueprint_metadata = bp_payload["metadata"] + + # POP unnecessary keys + blueprint_metadata.pop("creation_time", None) + blueprint_metadata.pop("last_update_time", None) + + metadata_obj = MetadataType.decompile(blueprint_metadata) + + # Copying dsl_name_map to global client_attrs + if bp_payload["spec"]["resources"]["client_attrs"].get("None", {}): + init_dsl_metadata_map(bp_payload["spec"]["resources"]["client_attrs"]["None"]) + + LOG.info("Decompiling blueprint {}".format(blueprint_name)) + + for sub_obj in blueprint.get("substrate_definition_list"): + sub_type = sub_obj.get("type", "") or "AHV_VM" + if sub_type == "K8S_POD": + raise NotImplementedError( + "Decompilation for k8s pod is not supported right now" + ) + elif sub_type != "AHV_VM": + LOG.warning( + "Decompilation support for providers other than AHV is experimental." + ) + break + + prefix = get_valid_identifier(prefix) + bp_cls = BlueprintType.decompile(blueprint, prefix=prefix) + bp_cls.__name__ = get_valid_identifier(blueprint_name) + bp_cls.__doc__ = blueprint_description + + create_bp_dir( + bp_cls=bp_cls, + with_secrets=with_secrets, + metadata_obj=metadata_obj, + bp_dir=bp_dir, + ) + click.echo( + "\nSuccessfully decompiled. Directory location: {}. Blueprint location: {}".format( + get_bp_dir(), os.path.join(get_bp_dir(), "blueprint.py") + ) + ) + + +def compile_blueprint_command(bp_file, brownfield_deployment_file, out): + + bp_payload = compile_blueprint( + bp_file, brownfield_deployment_file=brownfield_deployment_file + ) + if bp_payload is None: + LOG.error("User blueprint not found in {}".format(bp_file)) + return + + credential_list = bp_payload["spec"]["resources"]["credential_definition_list"] + is_secret_avl = False + for cred in credential_list: + if cred["secret"].get("secret", None): + cred["secret"].pop("secret") + is_secret_avl = True + # At compile time, value will be empty + cred["secret"]["value"] = "" + + if is_secret_avl: + LOG.warning("Secrets are not shown in payload !!!") + + if out == "json": + click.echo(json.dumps(bp_payload, indent=4, separators=(",", ": "))) + elif out == "yaml": + click.echo(yaml.dump(bp_payload, default_flow_style=False)) + else: + LOG.error("Unknown output format {} given".format(out)) + + +def format_blueprint_command(bp_file): + path = pathlib.Path(bp_file) + LOG.debug("Formatting blueprint {} using black".format(path)) + if format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.DIFF + ): + LOG.info("Patching above diff to blueprint - {}".format(path)) + format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.YES + ) + LOG.info("All done!") + else: + LOG.info("Blueprint {} left unchanged.".format(path)) + + +def get_blueprint_uuid(name, all=False, is_brownfield=False): + """returns blueprint uuid if present else raises error""" + + client = get_api_client() + params = {"filter": "name=={}".format(name)} + if not all: + params["filter"] += ";state!=DELETED" + + if is_brownfield: + params["filter"] += ";type==BROWNFIELD" + + res, err = client.blueprint.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + blueprint = None + if entities: + if len(entities) != 1: + LOG.error("More than one blueprint found - {}".format(entities)) + sys.exit(-1) + + LOG.info("{} found ".format(name)) + blueprint = entities[0] + else: + LOG.error("No blueprint found with name {} found".format(name)) + sys.exit("No blueprint found with name {} found".format(name)) + + return blueprint["metadata"]["uuid"] + + +def get_blueprint(name, all=False, is_brownfield=False): + """returns blueprint get call data""" + + client = get_api_client() + bp_uuid = get_blueprint_uuid(name=name, all=all, is_brownfield=is_brownfield) + res, err = client.blueprint.read(bp_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + return res.json() + + +def get_blueprint_runtime_editables(client, blueprint): + + bp_uuid = blueprint.get("metadata", {}).get("uuid", None) + if not bp_uuid: + LOG.debug("Blueprint UUID not present in metadata") + raise Exception("Invalid blueprint provided {} ".format(blueprint)) + res, err = client.blueprint._get_editables(bp_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + return response.get("resources", []) + + +def get_variable_value(variable, bp_data, launch_runtime_vars): + """return variable value from launch_params/cli_prompt""" + + var_context = variable["context"] + var_name = variable.get("name", "") + + # If launch_runtime_vars is given, return variable vlaue from it + if launch_runtime_vars: + return get_val_launch_runtime_var( + launch_runtime_vars=launch_runtime_vars, + field="value", # Only 'value' attribute is editable + path=var_name, + context=var_context, + ) + + # Fetch the options for value of dynamic variables + if variable["type"] in ["HTTP_LOCAL", "EXEC_LOCAL", "HTTP_SECRET", "EXEC_SECRET"]: + choices, err = get_variable_value_options( + bp_uuid=bp_data["metadata"]["uuid"], var_uuid=variable["uuid"] + ) + if err: + click.echo("") + LOG.warning( + "Exception occured while fetching value of variable '{}': {}".format( + var_name, err + ) + ) + + # Stripping out new line character from options + choices = [_c.strip() for _c in choices] + + else: + # Extract options for predefined variables from bp payload + var_data = get_variable_data( + bp_data=bp_data["status"]["resources"], + context_data=bp_data["status"]["resources"], + var_context=var_context, + var_name=var_name, + ) + choices = var_data.get("options", {}).get("choices", []) + + click.echo("") + if choices: + click.echo("Choose from given choices: ") + for choice in choices: + click.echo("\t{}".format(highlight_text(repr(choice)))) + + # CASE for `type` in ['SECRET', 'EXEC_SECRET', 'HTTP_SECRET'] + hide_input = variable.get("type").split("_")[-1] == "SECRET" + var_default_val = variable["value"].get("value", None) + new_val = click.prompt( + "Value for '{}' in {} [{}]".format( + var_name, var_context, highlight_text(repr(var_default_val)) + ), + default=var_default_val, + show_default=False, + hide_input=hide_input, + ) + + return new_val + + +def get_variable_data(bp_data, context_data, var_context, var_name): + """return variable data from blueprint payload""" + + context_map = { + "app_profile": "app_profile_list", + "deployment": "deployment_create_list", + "package": "package_definition_list", + "service": "service_definition_list", + "substrate": "substrate_definition_list", + "action": "action_list", + "runbook": "runbook", + } + + # Converting to list + context_list = var_context.split(".") + i = 0 + + # Iterate the list + while i < len(context_list): + entity_type = context_list[i] + + if entity_type in context_map: + entity_type_val = context_map[entity_type] + if entity_type_val in context_data: + context_data = context_data[entity_type_val] + else: + context_data = bp_data[entity_type_val] + elif entity_type == "variable": + break + + else: + LOG.error("Unknown entity type {}".format(entity_type)) + sys.exit(-1) + + entity_name = context_list[i + 1] + if isinstance(context_data, list): + for entity in context_data: + if entity["name"] == entity_name: + context_data = entity + break + + # Increment iterator by two positions + i = i + 2 + + # Checking for the variable data + for var in context_data["variable_list"]: + if var_name == var["name"]: + return var + + LOG.error("No data found with variable name {}".format(var_name)) + sys.exit(-1) + + +def get_val_launch_runtime_var(launch_runtime_vars, field, path, context): + """Returns value of variable from launch_runtime_vars(Non-interactive)""" + + filtered_launch_runtime_vars = list( + filter( + lambda e: is_launch_runtime_vars_context_matching(e["context"], context) + and e["name"] == path, + launch_runtime_vars, + ) + ) + if len(filtered_launch_runtime_vars) > 1: + LOG.error( + "Unable to populate runtime editables: Multiple matches for name {} and context {}".format( + path, context + ) + ) + sys.exit(-1) + if len(filtered_launch_runtime_vars) == 1: + return filtered_launch_runtime_vars[0].get("value", {}).get(field, None) + return None + + +def get_val_launch_runtime_substrate(launch_runtime_substrates, path, context=None): + """Returns value of substrate from launch_runtime_substrates(Non-interactive)""" + + filtered_launch_runtime_substrates = list( + filter(lambda e: e["name"] == path, launch_runtime_substrates) + ) + if len(filtered_launch_runtime_substrates) > 1: + LOG.error( + "Unable to populate runtime editables: Multiple matches for name {} and context {}".format( + path, context + ) + ) + sys.exit(-1) + if len(filtered_launch_runtime_substrates) == 1: + return filtered_launch_runtime_substrates[0].get("value", {}) + return None + + +def get_val_launch_runtime_deployment(launch_runtime_deployments, path, context=None): + """Returns value of deployment from launch_runtime_deployments(Non-interactive)""" + + launch_runtime_deployments = list( + filter(lambda e: e["name"] == path, launch_runtime_deployments) + ) + if len(launch_runtime_deployments) > 1: + LOG.error( + "Unable to populate runtime editables: Multiple matches for name {} and context {}".format( + path, context + ) + ) + sys.exit(-1) + if len(launch_runtime_deployments) == 1: + return launch_runtime_deployments[0].get("value", {}) + return None + + +def get_val_launch_runtime_credential(launch_runtime_credentials, path, context=None): + """Returns value of credential from launch_runtime_credentials(Non-interactive)""" + + launch_runtime_credentials = list( + filter(lambda e: e["name"] == path, launch_runtime_credentials) + ) + if len(launch_runtime_credentials) > 1: + LOG.error( + "Unable to populate runtime editables: Multiple matches for name {} and context {}".format( + path, context + ) + ) + sys.exit(-1) + if len(launch_runtime_credentials) == 1: + return launch_runtime_credentials[0].get("value", {}) + return None + + +def is_launch_runtime_vars_context_matching(launch_runtime_var_context, context): + """Used for matching context of variables""" + + context_list = context.split(".") + if len(context_list) > 1 and context_list[-1] == "variable": + return context_list[-2] == launch_runtime_var_context or ( + is_launch_runtime_var_action_match(launch_runtime_var_context, context_list) + ) + return False + + +def is_launch_runtime_var_action_match(launch_runtime_var_context, context_list): + """Used for matching context of variable under action""" + + launch_runtime_var_context_list = launch_runtime_var_context.split(".") + + # Note: As variables under profile level actions can be marked as runtime_editable only + # Context ex: app_profile..action..runbook..variable + if len(launch_runtime_var_context_list) == 2 and len(context_list) >= 4: + if ( + context_list[1] == launch_runtime_var_context_list[0] + and context_list[3] == launch_runtime_var_context_list[1] + ): + return True + return False + + +def parse_launch_params_attribute(launch_params, parse_attribute): + """Parses launch params and return value of parse_attribute i.e. variable_list, substrate_list, deployment_list, credenetial_list in file""" + + if launch_params: + if file_exists(launch_params) and launch_params.endswith(".py"): + return import_var_from_file(launch_params, parse_attribute, []) + else: + LOG.error( + "Invalid launch_params passed! Must be a valid and existing.py file!" + ) + sys.exit(-1) + return [] + + +def parse_launch_runtime_vars(launch_params): + """Returns variable_list object from launch_params file""" + + return parse_launch_params_attribute( + launch_params=launch_params, parse_attribute="variable_list" + ) + + +def parse_launch_runtime_substrates(launch_params): + """Returns substrate_list object from launch_params file""" + + return parse_launch_params_attribute( + launch_params=launch_params, parse_attribute="substrate_list" + ) + + +def parse_launch_runtime_deployments(launch_params): + """Returns deployment_list object from launch_params file""" + + return parse_launch_params_attribute( + launch_params=launch_params, parse_attribute="deployment_list" + ) + + +def parse_launch_runtime_credentials(launch_params): + """Returns credential_list object from launch_params file""" + + return parse_launch_params_attribute( + launch_params=launch_params, parse_attribute="credential_list" + ) + + +def parse_launch_runtime_configs(launch_params, config_type): + """Returns snapshot or restore config_list obj frorm launch_params file""" + return parse_launch_params_attribute( + launch_params=launch_params, parse_attribute=config_type + "_config_list" + ) + + +def get_variable_value_options(bp_uuid, var_uuid, poll_interval=10): + """returns dynamic variable values and api exception if occured""" + + client = get_api_client() + res, _ = client.blueprint.variable_values(uuid=bp_uuid, var_uuid=var_uuid) + + var_task_data = res.json() + + # req_id and trl_id are necessary + req_id = var_task_data["request_id"] + trl_id = var_task_data["trl_id"] + + # Poll till completion of epsilon task + maxWait = 5 * 60 + count = 0 + while count < maxWait: + res, err = client.blueprint.variable_values_from_trlid( + uuid=bp_uuid, var_uuid=var_uuid, req_id=req_id, trl_id=trl_id + ) + + # If there is exception during variable api call, it would be silently ignored + if err: + return list(), err + + var_val_data = res.json() + if var_val_data["state"] == "SUCCESS": + return var_val_data["values"], None + + count += poll_interval + time.sleep(poll_interval) + + LOG.error("Waited for 5 minutes for dynamic variable evaludation") + sys.exit(-1) + + +def get_protection_policy_rule( + protection_policy_uuid, + protection_rule_uuid, + snapshot_config_uuid, + app_profile, + protection_policies, + subnet_cluster_map, + substrate_list, +): + """returns protection policy, protection rule tuple from cli_prompt""" + + snapshot_config = next( + ( + config + for config in app_profile["snapshot_config_list"] + if config["uuid"] == snapshot_config_uuid + ), + None, + ) + if not snapshot_config: + LOG.err( + "No snapshot config with uuid {} found in App Profile {}".format( + snapshot_config_uuid, app_profile["name"] + ) + ) + sys.exit("Snapshot config {} not found".format(snapshot_config_uuid)) + is_local_snapshot = ( + snapshot_config["attrs_list"][0]["snapshot_location_type"].lower() == "local" + ) + config_target = snapshot_config["attrs_list"][0]["target_any_local_reference"] + target_substrate_reference = next( + ( + deployment["substrate_local_reference"] + for deployment in app_profile["deployment_create_list"] + if deployment["uuid"] == config_target["uuid"] + ), + None, + ) + if not target_substrate_reference: + LOG.error( + "No deployment with uuid {} found under app profile {}".format( + config_target, app_profile["name"] + ) + ) + sys.exit("Deployment {} not found".format(config_target)) + target_subnet_uuid = next( + ( + substrate["create_spec"]["resources"]["nic_list"][0]["subnet_reference"][ + "uuid" + ] + for substrate in substrate_list + if substrate["uuid"] == target_substrate_reference["uuid"] + ), + None, + ) + if not target_subnet_uuid: + LOG.error( + "No substrate {} with uuid {} found".format( + target_substrate_reference.get("name", ""), + target_substrate_reference["uuid"], + ) + ) + sys.exit("Substrate {} not found".format(target_substrate_reference["uuid"])) + + default_policy = "" + policy_choices = {} + for policy in protection_policies: + if ( + is_local_snapshot and policy["resources"]["rule_type"].lower() != "remote" + ) or ( + not is_local_snapshot + and policy["resources"]["rule_type"].lower() != "local" + ): + policy_choices[policy["name"]] = policy + if (not default_policy and policy["resources"]["is_default"]) or ( + protection_policy_uuid == policy["uuid"] + ): + default_policy = policy["name"] + if not policy_choices: + LOG.error( + "No protection policy found under this project. Please add one from the UI" + ) + sys.exit("No protection policy found") + if not default_policy or default_policy not in policy_choices: + default_policy = list(policy_choices.keys())[0] + click.echo("") + click.echo("Choose from given choices: ") + for choice in policy_choices.keys(): + click.echo("\t{}".format(highlight_text(repr(choice)))) + + selected_policy_name = click.prompt( + "Protection Policy for '{}' [{}]".format( + snapshot_config["name"], highlight_text(repr(default_policy)) + ), + default=default_policy, + show_default=False, + ) + if selected_policy_name not in policy_choices: + LOG.error( + "Invalid value '{}' for protection policy".format(selected_policy_name) + ) + sys.exit("Invalid protection policy") + selected_policy = policy_choices[selected_policy_name] + ordered_site_list = selected_policy["resources"]["ordered_availability_site_list"] + cluster_uuid = [ + sc["cluster_uuid"] + for sc in subnet_cluster_map + if sc["subnet_uuid"] == target_subnet_uuid + ] + if not cluster_uuid: + LOG.error( + "Cannot find the cluster associated with the subnet having uuid {}".format( + target_subnet_uuid + ) + ) + sys.exit("Cluster not found") + cluster_uuid = cluster_uuid[0] + cluster_idx = -1 + for i, site in enumerate(ordered_site_list): + if ( + site["infra_inclusion_list"]["cluster_references"][0]["uuid"] + == cluster_uuid + ): + cluster_idx = i + break + if cluster_idx < 0: + LOG.error( + "Unable to find cluster with uuid {} in protection policy {}".format( + cluster_uuid, selected_policy_name + ) + ) + sys.exit("Cluster not found") + both_rules_present = selected_policy["resources"]["rule_type"].lower() == "both" + + def get_target_cluster_name(target_cluster_idx): + target_cluster_uuid = ordered_site_list[target_cluster_idx][ + "infra_inclusion_list" + ]["cluster_references"][0]["uuid"] + target_cluster_name = next( + ( + sc["cluster_name"] + for sc in subnet_cluster_map + if sc["cluster_uuid"] == target_cluster_uuid + ), + None, + ) + if not target_cluster_name: + LOG.error( + "Unable to find the cluster with uuid {}".format(target_cluster_uuid) + ) + sys.exit("Cluster not found") + return target_cluster_name + + default_rule_idx, i = 1, 1 + rule_choices = {} + for rule in selected_policy["resources"]["app_protection_rule_list"]: + source_cluster_idx = rule["first_availability_site_index"] + if source_cluster_idx == cluster_idx: + expiry, categories = {}, "" + if both_rules_present: + if ( + is_local_snapshot + and source_cluster_idx == rule["second_availability_site_index"] + ): + expiry = rule["local_snapshot_retention_policy"][ + "snapshot_expiry_policy" + ] + elif ( + not is_local_snapshot + and source_cluster_idx != rule["second_availability_site_index"] + ): + categories = ", ".join( + [ + "{}:{}".format(k, v[0]) + for k, v in rule["category_filter"]["params"].items() + ] + ) + expiry = rule["remote_snapshot_retention_policy"][ + "snapshot_expiry_policy" + ] + else: + if is_local_snapshot: + expiry = rule["local_snapshot_retention_policy"][ + "snapshot_expiry_policy" + ] + else: + categories = ", ".join( + [ + "{}:{}".format(k, v[0]) + for k, v in rule["category_filter"]["params"].items() + ] + ) + expiry = rule["remote_snapshot_retention_policy"][ + "snapshot_expiry_policy" + ] + if expiry: + target_cluster_name = get_target_cluster_name( + rule["second_availability_site_index"] + ) + if rule["uuid"] == protection_rule_uuid: + default_rule_idx = i + label = ( + "{}. Snapshot expires in {} {}. Target cluster: {}. Categories: {}".format( + i, + expiry["multiple"], + expiry["interval_type"].lower(), + target_cluster_name, + categories, + ) + if categories + else "{}. Snapshot expires in {} {}. Target cluster: {}".format( + i, + expiry["multiple"], + expiry["interval_type"].lower(), + target_cluster_name, + ) + ) + rule_choices[i] = {"label": label, "rule": rule} + i += 1 + + if not rule_choices: + LOG.error( + "No matching protection rules found under protection policy {}. Please add the rules using UI to continue".format( + selected_policy_name + ) + ) + sys.exit("No protection rules found") + click.echo("") + click.echo("Choose from given choices: ") + for choice in rule_choices.values(): + click.echo("\t{}".format(highlight_text(repr(choice["label"])))) + + selected_rule = click.prompt( + "Protection Rule for '{}' [{}]".format( + snapshot_config["name"], highlight_text(repr(default_rule_idx)) + ), + default=default_rule_idx, + show_default=False, + ) + if selected_rule not in rule_choices: + LOG.error("Invalid value '{}' for protection rule".format(selected_rule)) + sys.exit("Invalid protection rule") + return selected_policy, rule_choices[selected_rule]["rule"] + + +def get_app(app_name): + """ + This routine checks if app with give name exists or not. + If exists then returns the app list resp + args: + app_name (str): app name + returns: + resp (dict): app response if app exists + """ + client = get_api_client() + + LOG.info("Searching for existing applications with name {}".format(app_name)) + + resp, err = client.application.list(params={"filter": "name=={}".format(app_name)}) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + resp = resp.json() + if resp["metadata"]["total_matches"] > 0: + LOG.info("Application found with name {}".format(app_name)) + return resp + + LOG.info("No existing application found with name {}".format(app_name)) + return None + + +def launch_blueprint_simple( + blueprint_name=None, + app_name=None, + blueprint=None, + profile_name=None, + patch_editables=True, + launch_params=None, + is_brownfield=False, + brownfield_deployment_file=None, + skip_app_name_check=False, +): + client = get_api_client() + + if app_name and not skip_app_name_check: + res = get_app(app_name) + if res: + LOG.debug(res) + LOG.error("Application Name ({}) is already used.".format(app_name)) + sys.exit(-1) + + if not blueprint: + blueprint = get_blueprint(blueprint_name, is_brownfield=is_brownfield) + + bp_metadata = blueprint.get("metadata", {}) + bp_status_data = blueprint.get("status", {}) + + blueprint_uuid = bp_metadata.get("uuid", "") + blueprint_name = blueprint_name or blueprint.get("metadata", {}).get("name", "") + + project_ref = bp_metadata.get("project_reference", {}) + project_uuid = project_ref.get("uuid") + bp_status = bp_status_data["state"] + if bp_status != "ACTIVE": + LOG.error("Blueprint is in {} state. Unable to launch it".format(bp_status)) + sys.exit(-1) + + LOG.info("Fetching runtime editables in the blueprint") + profiles = get_blueprint_runtime_editables(client, blueprint) + profile = None + if profile_name is None: + profile = profiles[0] + else: + for app_profile in profiles: + app_prof_ref = app_profile.get("app_profile_reference", {}) + if app_prof_ref.get("name") == profile_name: + profile = app_profile + + break + if not profile: + LOG.error("No profile found with name {}".format(profile_name)) + sys.exit(-1) + + runtime_bf_deployment_list = [] + if brownfield_deployment_file: + bp_metadata = blueprint.get("metadata", {}) + project_uuid = bp_metadata.get("project_reference", {}).get("uuid", "") + + # Set bp project in dsl context + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + + if project_uuid: + project_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.PROJECT, uuid=project_uuid + ) + bp_project = project_data.get("name") + + if bp_project and bp_project != project_name: + project_name = bp_project + ContextObj.update_project_context(project_name=project_name) + + bf_deployments = get_brownfield_deployment_classes(brownfield_deployment_file) + + bp_profile_data = {} + for _profile in bp_status_data["resources"]["app_profile_list"]: + if _profile["name"] == profile["app_profile_reference"]["name"]: + bp_profile_data = _profile + + # Get substrate-account map + bp_subs_uuid_account_uuid_map = {} + for _sub in bp_status_data["resources"]["substrate_definition_list"]: + if _sub.get("type", "") == "EXISTING_VM": + bp_subs_uuid_account_uuid_map[_sub["uuid"]] = "" + continue + + account_uuid = _sub["create_spec"]["resources"]["account_uuid"] + + if _sub.get("type", "") == "AHV_VM": + account_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.ACCOUNT, uuid=account_uuid + ) + # replace pe account uuid by pc account uuid + account_uuid = account_data["data"]["pc_account_uuid"] + + bp_subs_uuid_account_uuid_map[_sub["uuid"]] = account_uuid + + # Get dep name-uuid map and dep-account_uuid map + bp_dep_name_uuid_map = {} + bp_dep_name_account_uuid_map = {} + for _dep in bp_profile_data.get("deployment_create_list", []): + bp_dep_name_uuid_map[_dep["name"]] = _dep["uuid"] + + _dep_sub_uuid = _dep["substrate_local_reference"]["uuid"] + bp_dep_name_account_uuid_map[_dep["name"]] = bp_subs_uuid_account_uuid_map[ + _dep_sub_uuid + ] + + # Compile brownfield deployment after attaching valid account to instance + for _bf_dep in bf_deployments: + _bf_dep_name = getattr(_bf_dep, "name", "") or _bf_dep.__name__ + + # Attaching correct account to brownfield instances + for _inst in _bf_dep.instances: + _inst.account_uuid = bp_dep_name_account_uuid_map[_bf_dep_name] + + _bf_dep = _bf_dep.get_dict() + + if _bf_dep_name in list(bp_dep_name_uuid_map.keys()): + runtime_bf_deployment_list.append( + { + "uuid": bp_dep_name_uuid_map[_bf_dep_name], + "name": _bf_dep_name, + "value": { + "brownfield_instance_list": _bf_dep.get( + "brownfield_instance_list" + ) + or [] + }, + } + ) + + runtime_editables = profile.pop("runtime_editables", []) + + launch_payload = { + "spec": { + "app_name": app_name + if app_name + else "App-{}-{}".format(blueprint_name, int(time.time())), + "app_description": "", + "app_profile_reference": profile.get("app_profile_reference", {}), + "runtime_editables": runtime_editables, + } + } + + if runtime_editables and patch_editables: + runtime_editables_json = json.dumps( + runtime_editables, indent=4, separators=(",", ": ") + ) + click.echo("Blueprint editables are:\n{}".format(runtime_editables_json)) + + # Check user input + prompt_cli = bool(not launch_params) + launch_runtime_vars = parse_launch_runtime_vars(launch_params) + launch_runtime_substrates = parse_launch_runtime_substrates(launch_params) + launch_runtime_deployments = parse_launch_runtime_deployments(launch_params) + launch_runtime_credentials = parse_launch_runtime_credentials(launch_params) + launch_runtime_snapshot_configs = parse_launch_runtime_configs( + launch_params, "snapshot" + ) + + res, err = client.blueprint.read(blueprint_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + bp_data = res.json() + + substrate_list = runtime_editables.get("substrate_list", []) + if substrate_list: + if not launch_params: + click.echo("\n\t\t\t", nl=False) + click.secho("SUBSTRATE LIST DATA", underline=True, bold=True) + + substrate_definition_list = bp_data["status"]["resources"][ + "substrate_definition_list" + ] + package_definition_list = bp_data["status"]["resources"][ + "package_definition_list" + ] + substrate_name_data_map = {} + for substrate in substrate_definition_list: + substrate_name_data_map[substrate["name"]] = substrate + + vm_img_map = {} + for package in package_definition_list: + if package["type"] == "SUBSTRATE_IMAGE": + vm_img_map[package["name"]] = package["uuid"] + + for substrate in substrate_list: + if launch_params: + new_val = get_val_launch_runtime_substrate( + launch_runtime_substrates=launch_runtime_substrates, + path=substrate.get("name"), + context=substrate.get("context", None), + ) + if new_val: + substrate["value"] = new_val + + else: + provider_type = substrate["type"] + provider_cls = get_provider(provider_type) + provider_cls.get_runtime_editables( + substrate, + project_uuid, + substrate_name_data_map[substrate["name"]], + vm_img_map, + ) + + bp_runtime_variables = runtime_editables.get("variable_list", []) + + # POP out action variables(Day2 action variables) bcz they cann't be given at bp launch time + variable_list = [] + for _var in bp_runtime_variables: + _var_context = _var["context"] + context_list = _var_context.split(".") + + # If variable is defined under runbook(action), ignore it + if len(context_list) >= 3 and context_list[-3] == "runbook": + continue + + variable_list.append(_var) + + if variable_list: + if not launch_params: + click.echo("\n\t\t\t", nl=False) + click.secho("VARIABLE LIST DATA", underline=True, bold=True) + + # NOTE: We are expecting only value in variables is editable (Ideal case) + # If later any attribute added to editables, pls change here accordingly + LOG.warning( + "Values fetched from API/ESCRIPT will not have a default. User will have to select an option at launch." + ) + for variable in variable_list: + new_val = get_variable_value( + variable=variable, + bp_data=bp_data, + launch_runtime_vars=launch_runtime_vars, + ) + if new_val: + variable["value"]["value"] = new_val + + deployment_list = runtime_editables.get("deployment_list", []) + # deployment can be only supplied via non-interactive way for now + if deployment_list and launch_params: + for deployment in deployment_list: + new_val = get_val_launch_runtime_deployment( + launch_runtime_deployments=launch_runtime_deployments, + path=deployment.get("name"), + context=deployment.get("context", None), + ) + if new_val: + deployment["value"] = new_val + + credential_list = runtime_editables.get("credential_list", []) + # credential can be only supplied via non-interactive way for now + if credential_list and launch_params: + for credential in credential_list: + new_val = get_val_launch_runtime_credential( + launch_runtime_credentials=launch_runtime_credentials, + path=credential.get("name"), + context=credential.get("context", None), + ) + if new_val: + credential["value"] = new_val + + snapshot_config_list = runtime_editables.get("snapshot_config_list", []) + restore_config_list = runtime_editables.get("restore_config_list", []) + restore_config_map = {config["uuid"]: config for config in restore_config_list} + + if snapshot_config_list and restore_config_list: + click.echo("\n\t\t\t", nl=False) + click.secho("SNAPSHOT CONFIG LIST DATA", underline=True, bold=True) + app_profile = next( + ( + _profile + for _profile in bp_data["status"]["resources"]["app_profile_list"] + if _profile["uuid"] == profile["app_profile_reference"]["uuid"] + ), + None, + ) + if not app_profile: + LOG.error( + "App Profile {} with uuid {} not found".format( + profile.get("app_profile_reference", {}).get("name", ""), + profile.get("app_profile_reference", {}).get("uuid", ""), + ) + ) + sys.exit(-1) + env_uuids = app_profile["environment_reference_list"] + if not env_uuids: + LOG.error( + "Cannot launch a blueprint with snapshot-restore configs without selecting an environment" + ) + sys.exit("No environment selected") + substrate_list = bp_data["status"]["resources"]["substrate_definition_list"] + + for snapshot_config in snapshot_config_list: + if launch_runtime_snapshot_configs: + _config = next( + ( + config + for config in launch_runtime_snapshot_configs + if config["name"] == snapshot_config["name"] + ), + None, + ) + if _config: + snapshot_config_obj = next( + ( + config + for config in app_profile["snapshot_config_list"] + if config["uuid"] == snapshot_config["uuid"] + ), + None, + ) + snapshot_config["value"] = deepcopy(_config["value"]) + restore_config_id = snapshot_config_obj[ + "config_reference_list" + ][0]["uuid"] + restore_config = restore_config_map[restore_config_id] + restore_config["value"] = deepcopy(_config["value"]) + continue + + res, err = client.blueprint.protection_policies( + blueprint_uuid, + app_profile["uuid"], + snapshot_config["uuid"], + env_uuids[0], + ) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit("Unable to retrieve protection policies") + protection_policies = [p["status"] for p in res.json()["entities"]] + payload = {"filter": "uuid=={}".format(env_uuids[0])} + res, err = client.environment.list(payload) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit("Unable to retrieve environments") + infra_list = next( + ( + env["status"]["resources"]["infra_inclusion_list"] + for env in res.json()["entities"] + if env["metadata"]["uuid"] == env_uuids[0] + ), + None, + ) + if not infra_list: + LOG.error("Cannot find accounts associated with the environment") + sys.exit("Unable to retrieve accounts") + ntnx_acc = next( + (acc for acc in infra_list if acc["type"] == "nutanix_pc"), None + ) + if not ntnx_acc: + LOG.error( + "No nutanix account found associated with the environment" + ) + sys.exit("No nutanix account found in environment") + ahv_new = AhvNew(client.connection) + filter_query = "_entity_id_=in={}".format( + "|".join(subnet["uuid"] for subnet in ntnx_acc["subnet_references"]) + ) + subnets = ahv_new.subnets( + filter_query=filter_query, + account_uuid=ntnx_acc["account_reference"]["uuid"], + ) + subnet_cluster_map = [ + { + "cluster_name": subnet["status"]["cluster_reference"]["name"], + "cluster_uuid": subnet["status"]["cluster_reference"]["uuid"], + "subnet_name": subnet["status"]["name"], + "subnet_uuid": subnet["metadata"]["uuid"], + } + for subnet in subnets["entities"] + ] + protection_policy = snapshot_config["value"]["attrs_list"][0][ + "app_protection_policy_reference" + ] + protection_rule = snapshot_config["value"]["attrs_list"][0][ + "app_protection_rule_reference" + ] + + protection_policy, protection_rule = get_protection_policy_rule( + protection_policy, + protection_rule, + snapshot_config["uuid"], + app_profile, + protection_policies, + subnet_cluster_map, + substrate_list, + ) + + snapshot_config_obj = next( + ( + config + for config in app_profile["snapshot_config_list"] + if config["uuid"] == snapshot_config["uuid"] + ), + None, + ) + if not snapshot_config_obj: + LOG.err( + "No snapshot config with uuid {} found in App Profile {}".format( + snapshot_config["uuid"], app_profile["name"] + ) + ) + sys.exit("Invalid snapshot config") + updated_value = { + "attrs_list": [ + { + "app_protection_rule_reference": protection_rule["uuid"], + "app_protection_policy_reference": protection_policy[ + "uuid" + ], + } + ] + } + snapshot_config["value"] = updated_value + restore_config_id = snapshot_config_obj["config_reference_list"][0][ + "uuid" + ] + restore_config = restore_config_map[restore_config_id] + restore_config["value"] = updated_value + + runtime_editables_json = json.dumps( + runtime_editables, indent=4, separators=(",", ": ") + ) + LOG.info("Updated blueprint editables are:\n{}".format(runtime_editables_json)) + + if runtime_bf_deployment_list: + bf_dep_names = [bfd["name"] for bfd in runtime_bf_deployment_list] + runtime_deployments = launch_payload["spec"]["runtime_editables"].get( + "deployment_list", [] + ) + for _rd in runtime_deployments: + if _rd["name"] not in bf_dep_names: + runtime_bf_deployment_list.append(_rd) + + launch_payload["spec"]["runtime_editables"][ + "deployment_list" + ] = runtime_bf_deployment_list + + runtime_bf_deployment_list_json = json.dumps( + runtime_bf_deployment_list, indent=4, separators=(",", ": ") + ) + LOG.info( + "Updated blueprint deployment editables are:\n{}".format( + runtime_bf_deployment_list_json + ) + ) + + res, err = client.blueprint.launch(blueprint_uuid, launch_payload) + if not err: + LOG.info("Blueprint {} queued for launch".format(blueprint_name)) + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + launch_req_id = response["status"]["request_id"] + + poll_launch_status(client, blueprint_uuid, launch_req_id) + + +def poll_launch_status(client, blueprint_uuid, launch_req_id): + # Poll every 10 seconds on the app status, for 5 mins + maxWait = 5 * 60 + count = 0 + while count < maxWait: + # call status api + LOG.info("Polling status of Launch") + res, err = client.blueprint.poll_launch(blueprint_uuid, launch_req_id) + response = res.json() + app_state = response["status"]["state"] + pprint(response) + if app_state == "success": + app_uuid = response["status"]["application_uuid"] + + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + + click.echo("Successfully launched. App uuid is: {}".format(app_uuid)) + + LOG.info( + "App url: https://{}:{}/console/#page/explore/calm/applications/{}".format( + pc_ip, pc_port, app_uuid + ) + ) + break + elif app_state == "failure": + LOG.debug("API response: {}".format(response)) + LOG.error("Failed to launch blueprint. Check API response above.") + break + elif err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info(app_state) + count += 10 + time.sleep(10) + + +def delete_blueprint(blueprint_names): + + client = get_api_client() + + for blueprint_name in blueprint_names: + bp_uuid = get_blueprint_uuid(blueprint_name) + _, err = client.blueprint.delete(bp_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + LOG.info("Blueprint {} deleted".format(blueprint_name)) + + +def create_patched_blueprint( + blueprint, project_data, environment_data, profile_name=None, with_secrets=False +): + """Patch the blueprint with the given environment to create a new blueprint""" + client = get_api_client() + org_bp_name = blueprint["metadata"]["name"] + org_bp_uuid = blueprint["metadata"]["uuid"] + project_uuid = project_data["metadata"]["uuid"] + env_uuid = environment_data["metadata"]["uuid"] + + new_bp_name = "{}-{}".format(org_bp_name, str(uuid.uuid4())[:8]) + request_spec = { + "api_version": "3.0", + "metadata": { + "kind": "blueprint", + "project_reference": {"kind": "project", "uuid": project_uuid}, + }, + "spec": { + "environment_profile_pairs": [ + { + "environment": {"uuid": env_uuid}, + "app_profile": {"name": profile_name}, + "keep_secrets": with_secrets, + } + ], + "new_blueprint": {"name": new_bp_name}, + }, + } + + msg = ( + "Creating Patched blueprint with secrets preserved" + if with_secrets + else "Creating Patched blueprint" + ) + LOG.info(msg) + bp_res, err = client.blueprint.patch_with_environment(org_bp_uuid, request_spec) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + bp_res = bp_res.json() + bp_status = bp_res["status"]["state"] + if bp_status != "ACTIVE": + LOG.error("Blueprint went to {} state".format(bp_status)) + sys.exit(-1) + + return bp_res + + +def patch_bp_if_required( + with_secrets=False, environment_name=None, blueprint_name=None, profile_name=None +): + """Patch the blueprint with the given environment to create a new blueprint if the requested app profile + is not already linked to the given environment""" + if environment_name: + bp = get_blueprint(blueprint_name) + project_uuid = bp["metadata"]["project_reference"]["uuid"] + environment_data, project_data = get_project_environment( + name=environment_name, project_uuid=project_uuid + ) + env_uuid = environment_data["metadata"]["uuid"] + + app_profiles = bp["spec"]["resources"]["app_profile_list"] + if profile_name is None: + profile_name = app_profiles[0]["name"] + + found_profile = None + for app_profile in app_profiles: + if app_profile["name"] == profile_name: + found_profile = app_profile + break + + if not found_profile: + raise Exception("No profile found with name {}".format(profile_name)) + + ref_env_uuid = next( + iter(app_profile.get("environment_reference_list", [])), None + ) + if ref_env_uuid != env_uuid: + new_blueprint = create_patched_blueprint( + bp, project_data, environment_data, profile_name, with_secrets + ) + return new_blueprint["metadata"]["name"], new_blueprint + + return blueprint_name, None diff --git a/framework/calm/dsl/cli/brownfield_commands.py b/framework/calm/dsl/cli/brownfield_commands.py new file mode 100644 index 0000000..cdfb82d --- /dev/null +++ b/framework/calm/dsl/cli/brownfield_commands.py @@ -0,0 +1,52 @@ +import click + +from .main import get +from .brownfield_vms import get_brownfield_vms +from .utils import FeatureFlagGroup +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.group("brownfield", cls=FeatureFlagGroup) +def brownfield_get(): + """Get brownfield items""" + + pass + + +@brownfield_get.command("vms") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option("--quiet", "-q", is_flag=True, default=False, help="Show only vms names.") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +@click.option("--project", "-p", help="Project name", required=True) +@click.option("--account", "-a", help="Account name", default=None) +@click.option( + "--type", + "-t", + "provider_type", + type=click.Choice(["AHV_VM", "AWS_VM", "AZURE_VM", "GCP_VM", "VMWARE_VM"]), + default="AHV_VM", + help="Provider type", +) +def _get_vm_list(limit, offset, quiet, out, project, provider_type, account): + """ + Get brownfield vms + + \b + >: If there are multiple accounts per provider_type in project, user need to supply the account name + other than provider type (added in 3.2.0) + + """ + + get_brownfield_vms(limit, offset, quiet, out, project, provider_type, account) diff --git a/framework/calm/dsl/cli/brownfield_vms.py b/framework/calm/dsl/cli/brownfield_vms.py new file mode 100644 index 0000000..c0d2a01 --- /dev/null +++ b/framework/calm/dsl/cli/brownfield_vms.py @@ -0,0 +1,338 @@ +import sys +import json +import click +import json +from prettytable import PrettyTable +from distutils.version import LooseVersion as LV + +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.constants import PROVIDER_ACCOUNT_TYPE_MAP +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version +from .utils import highlight_text, get_account_details + +LOG = get_logging_handle(__name__) + + +def get_brownfield_ahv_vm_list(entity_rows): + """displays ahv brownfield vms""" + + table = PrettyTable() + table.field_names = [ + "NAME", + "CLUSTER", + "SUBNET", + "ADDRESS", + "MEMORY", + "SOCKETS", + "VCPU", + "ID", + ] + + for row in entity_rows: + + # Status section + st_resources = row["status"]["resources"] + + cluster = st_resources["cluster_name"] + subnet = st_resources["subnet_list"] + address = ",".join(st_resources["address_list"]) + memory = st_resources["memory_size_mib"] // 1024 + sockets = st_resources["num_sockets"] + vcpus = st_resources["num_vcpus_per_socket"] + instance_id = st_resources["instance_id"] + instance_name = st_resources["instance_name"] + + table.add_row( + [ + highlight_text(instance_name), + highlight_text(cluster), + highlight_text(subnet), + highlight_text(address), + highlight_text(memory), + highlight_text(sockets), + highlight_text(vcpus), + highlight_text(instance_id), + ] + ) + + click.echo(table) + + +def get_brownfield_aws_vm_list(entity_rows): + """displays aws brownfield vms""" + + table = PrettyTable() + table.field_names = [ + "NAME", + "PUBLIC IP ADDRESS", + "PRIVATE DNS", + "PUBLIC DNS", + "REGION", + "POWER STATE", + "ID", + ] + + for row in entity_rows: + + # Status section + st_resources = row["status"]["resources"] + + address = ",".join(st_resources["public_ip_address"]) + private_dns_name = st_resources["private_dns_name"] + public_dns_name = ",".join(st_resources["public_dns_name"]) + region = ",".join(st_resources["region"]) + power_state = st_resources["power_state"] + instance_id = st_resources["instance_id"] + instance_name = st_resources["instance_name"] + + table.add_row( + [ + highlight_text(instance_name), + highlight_text(address), + highlight_text(private_dns_name), + highlight_text(public_dns_name), + highlight_text(region), + highlight_text(power_state), + highlight_text(instance_id), + ] + ) + + click.echo(table) + + +def get_brownfield_azure_vm_list(entity_rows): + """displays azure brownfield vms""" + + table = PrettyTable() + table.field_names = [ + "NAME", + "RESOURCE GROUP", + "LOCATION", + "PUBLIC IP", + "PRIVATE IP", + "HARDWARE PROFILE", + "ID", + ] + + for row in entity_rows: + + # Status section + st_resources = row["status"]["resources"] + + instance_id = st_resources["instance_id"] + instance_name = st_resources["instance_name"] + resource_group = st_resources["resource_group"] + location = st_resources["location"] + public_ip = st_resources["public_ip_address"] + private_ip = st_resources["private_ip_address"] + hardwareProfile = ( + st_resources["properties"].get("hardwareProfile", {}).get("vmSize", "") + ) + + table.add_row( + [ + highlight_text(instance_name), + highlight_text(resource_group), + highlight_text(location), + highlight_text(public_ip), + highlight_text(private_ip), + highlight_text(hardwareProfile), + highlight_text(instance_id), + ] + ) + + click.echo(table) + + +def get_brownfield_gcp_vm_list(entity_rows): + """displays gcp brownfield vms""" + + table = PrettyTable() + table.field_names = [ + "NAME", + "ZONE", + "SUBNETS", + "NETWORK", + "NAT IP", + "NETWORK NAME", + "ID", + ] + + for row in entity_rows: + + # Status section + st_resources = row["status"]["resources"] + + instance_id = st_resources["id"] + instance_name = st_resources["instance_name"] + zone = st_resources["zone"] + subnetwork = st_resources["subnetwork"] + network = st_resources["network"] + natIP = ",".join(st_resources["natIP"]) + network_name = ",".join(st_resources["network_name"]) + + table.add_row( + [ + highlight_text(instance_name), + highlight_text(zone), + highlight_text(subnetwork), + highlight_text(network), + highlight_text(natIP), + highlight_text(network_name), + highlight_text(instance_id), + ] + ) + + click.echo(table) + + +def get_vmware_vm_data_with_version_filtering(vm_data): + """returns instance_data_according_to_version_filter""" + + CALM_VERSION = Version.get_version("Calm") + + instance_id = vm_data["instance_id"] + instance_name = vm_data["instance_name"] + + if LV(CALM_VERSION) >= LV("3.3.0"): + hostname = vm_data["guest_hostname"] + address = ",".join(vm_data["guest_ipaddress"]) + vcpus = vm_data["cpu"] + sockets = vm_data["num_vcpus_per_socket"] + memory = int(vm_data["memory"]) // 1024 + guest_family = vm_data.get("guest_family", "") + template = vm_data.get("is_template", False) + + else: + hostname = vm_data["guest.hostName"] + address = ",".join(vm_data["guest.ipAddress"]) + vcpus = vm_data["config.hardware.numCPU"] + sockets = vm_data["config.hardware.numCoresPerSocket"] + memory = int(vm_data["config.hardware.memoryMB"]) // 1024 + guest_family = vm_data.get("guest.guestFamily", "") + template = vm_data.get("config.template", False) + + return ( + instance_id, + instance_name, + hostname, + address, + vcpus, + sockets, + memory, + guest_family, + template, + ) + + +def get_brownfield_vmware_vm_list(entity_rows): + """displays vmware brownfield vms""" + + table = PrettyTable() + table.field_names = [ + "NAME", + "HOSTNAME", + "IP ADDRESS", + "VCPU", + "CORES PER VCPU", + "MEMORY (GIB)", + "GUEST FAMILY", + "TEMPLATE", + "ID", + ] + + for row in entity_rows: + + # Status section + st_resources = row["status"]["resources"] + ( + instance_id, + instance_name, + hostname, + address, + vcpus, + sockets, + memory, + guest_family, + template, + ) = get_vmware_vm_data_with_version_filtering(st_resources) + + table.add_row( + [ + highlight_text(instance_name), + highlight_text(hostname), + highlight_text(address), + highlight_text(vcpus), + highlight_text(sockets), + highlight_text(memory), + highlight_text(guest_family), + highlight_text(template), + highlight_text(instance_id), + ] + ) + + click.echo(table) + + +def get_brownfield_vms( + limit, offset, quiet, out, project_name, provider_type, account_name +): + """displays brownfield vms for a provider""" + + client = get_api_client() + + account_detail = get_account_details( + project_name=project_name, + account_name=account_name, + provider_type=provider_type, + pe_account_needed=True, + ) + project_uuid = account_detail["project"]["uuid"] + account_name = account_detail["account"]["name"] + account_uuid = account_detail["account"]["uuid"] + + LOG.info("Using account '{}' for listing brownfield vms".format(account_name)) + + LOG.info("Fetching brownfield vms") + Obj = get_resource_api("blueprints/brownfield_import/vms", client.connection) + filter_query = "project_uuid=={};account_uuid=={}".format( + project_uuid, account_uuid + ) + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list(params=params) + if err: + LOG.error(err) + sys.exit(-1) + + if out == "json": + click.echo(json.dumps(res.json(), indent=4, separators=(",", ": "))) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo( + highlight_text( + "No brownfield {} found on account '{}' !!!\n".format( + provider_type, account_name + ) + ) + ) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + if provider_type == "AHV_VM": + get_brownfield_ahv_vm_list(json_rows) + elif provider_type == "AWS_VM": + get_brownfield_aws_vm_list(json_rows) + elif provider_type == "AZURE_VM": + get_brownfield_azure_vm_list(json_rows) + elif provider_type == "GCP_VM": + get_brownfield_gcp_vm_list(json_rows) + elif provider_type == "VMWARE_VM": + get_brownfield_vmware_vm_list(json_rows) diff --git a/framework/calm/dsl/cli/cache_commands.py b/framework/calm/dsl/cli/cache_commands.py new file mode 100644 index 0000000..8e5531e --- /dev/null +++ b/framework/calm/dsl/cli/cache_commands.py @@ -0,0 +1,59 @@ +import datetime +import click + +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE + +from .main import show, update, clear +from .utils import highlight_text +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def get_cache_table_types(): + """returns cache table types""" + + # Note do not use Cache.get_cache_tables().keys(), + # It will break, container initialization due to cyclic dependency + table_types = [] + for attr in CACHE.ENTITY.__dict__: + if not (attr.startswith("__")): + table_types.append(getattr(CACHE.ENTITY, attr)) + + return table_types + + +@show.command("cache") +def show_cache_command(): + """Display the cache data""" + + Cache.show_data() + + +@clear.command("cache") +def clear_cache(): + """Clear the entities stored in cache""" + + Cache.clear_entities() + LOG.info(highlight_text("Cache cleared at {}".format(datetime.datetime.now()))) + + +@update.command("cache") +@click.option( + "--entity", + "-e", + default=None, + help="Cache entity, if not given will update whole cache", + type=click.Choice(get_cache_table_types()), +) +def update_cache(entity): + """Update the data for dynamic entities stored in the cache""" + + if entity: + Cache.sync_table(entity) + Cache.show_table(entity) + else: + Cache.sync() + Cache.show_data() + LOG.info(highlight_text("Cache updated at {}".format(datetime.datetime.now()))) diff --git a/framework/calm/dsl/cli/click_options.py b/framework/calm/dsl/cli/click_options.py new file mode 100644 index 0000000..7944b57 --- /dev/null +++ b/framework/calm/dsl/cli/click_options.py @@ -0,0 +1,82 @@ +import click + +from calm.dsl.config import get_context +from calm.dsl.log import CustomLogging + + +def simple_verbosity_option(logging_mod=None, *names, **kwargs): + """A decorator that adds a `--verbose, -v` option to the decorated + command. + Name can be configured through ``*names``. Keyword arguments are passed to + the underlying ``click.option`` decorator. + """ + + if not names: + names = ["--verbose", "-v"] + + if not isinstance(logging_mod, CustomLogging): + raise TypeError("Logging object should be instance of CustomLogging.") + + log_level = "INFO" + try: + ContextObj = get_context() + log_config = ContextObj.get_log_config() + + if "level" in log_config: + log_level = log_config.get("level") or log_level + + except (FileNotFoundError, ValueError): + # At the time of initializing dsl, config file may not be present or incorrect + pass + + logging_levels = logging_mod.get_logging_levels() + if log_level not in logging_levels: + raise ValueError( + "Invalid log level in config. Select from {}".format(logging_levels) + ) + + log_level = logging_levels.index(log_level) + 1 + kwargs.setdefault("default", log_level) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("help", "Verboses the output") + kwargs.setdefault("is_eager", True) + kwargs.setdefault("count", True) + + def decorator(f): + def _set_level(ctx, param, value): + logging_levels = logging_mod.get_logging_levels() + if value < 1 or value > len(logging_levels): + raise click.BadParameter( + "Should be atleast 1 and atmost {}".format(len(logging_levels)) + ) + + log_level = logging_levels[value - 1] + x = getattr(logging_mod, log_level, None) + CustomLogging.set_verbose_level(x) + + return click.option(*names, callback=_set_level, **kwargs)(f) + + return decorator + + +def show_trace_option(logging_mod=None, **kwargs): + """A decorator that add --show_trace/-st option to decorated command""" + + if not isinstance(logging_mod, CustomLogging): + raise TypeError("Logging object should be instance of CustomLogging.") + + names = ["--show_trace", "-st"] + kwargs.setdefault("is_flag", True) + kwargs.setdefault("default", False) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("help", "Show the traceback for the exceptions") + kwargs.setdefault("is_eager", True) + + def decorator(f): + def _set_show_trace(ctx, param, value): + if value: + CustomLogging.enable_show_trace() + + return click.option(*names, callback=_set_show_trace, **kwargs)(f) + + return decorator diff --git a/framework/calm/dsl/cli/completion_commands.py b/framework/calm/dsl/cli/completion_commands.py new file mode 100644 index 0000000..9e7d9db --- /dev/null +++ b/framework/calm/dsl/cli/completion_commands.py @@ -0,0 +1,49 @@ +import click +import click_completion + +from .main import completion + + +@completion.command("show") +@click.option( + "-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion" +) +@click.argument( + "shell", + required=False, + type=click_completion.DocumentedChoice(click_completion.core.shells), +) +def show(shell, case_insensitive): + """Show the click-completion-command completion code""" + extra_env = ( + {"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"} + if case_insensitive + else {} + ) + click.echo(click_completion.core.get_code(shell, extra_env=extra_env)) + + +@completion.command("install") +@click.option( + "--append/--overwrite", help="Append the completion code to the file", default=None +) +@click.option( + "-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion" +) +@click.argument( + "shell", + required=False, + type=click_completion.DocumentedChoice(click_completion.core.shells), +) +@click.argument("path", required=False) +def install(append, case_insensitive, shell, path): + """Install the click-completion-command completion""" + extra_env = ( + {"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"} + if case_insensitive + else {} + ) + shell, path = click_completion.core.install( + shell=shell, path=path, append=append, extra_env=extra_env + ) + click.echo("%s completion installed in %s" % (shell, path)) diff --git a/framework/calm/dsl/cli/config_commands.py b/framework/calm/dsl/cli/config_commands.py new file mode 100644 index 0000000..3839307 --- /dev/null +++ b/framework/calm/dsl/cli/config_commands.py @@ -0,0 +1,10 @@ +from calm.dsl.config import get_context +from .main import show + + +@show.command("config") +def show_config(): + """Shows server configuration""" + + ContextObj = get_context() + ContextObj.print_config() diff --git a/framework/calm/dsl/cli/constants.py b/framework/calm/dsl/cli/constants.py new file mode 100644 index 0000000..f7ed0e0 --- /dev/null +++ b/framework/calm/dsl/cli/constants.py @@ -0,0 +1,503 @@ +class RUNLOG: + class STATUS: + SUCCESS = "SUCCESS" + PENDING = "PENDING" + RUNNING = "RUNNING" + FAILURE = "FAILURE" + WARNING = "WARNING" + ERROR = "ERROR" + APPROVAL = "APPROVAL" + APPROVAL_FAILED = "APPROVAL_FAILED" + ABORTED = "ABORTED" + ABORTING = "ABORTING" + SYS_FAILURE = "SYS_FAILURE" + SYS_ERROR = "SYS_ERROR" + SYS_ABORTED = "SYS_ABORTED" + ALREADY_RUN = "ALREADY_RUN" + TIMEOUT = "TIMEOUT" + INPUT = "INPUT" + CONFIRM = "CONFIRM" + PAUSED = "PAUSED" + + TERMINAL_STATES = [ + STATUS.SUCCESS, + STATUS.FAILURE, + STATUS.APPROVAL_FAILED, + STATUS.WARNING, + STATUS.ERROR, + STATUS.ABORTED, + STATUS.SYS_FAILURE, + STATUS.SYS_ERROR, + STATUS.SYS_ABORTED, + ] + FAILURE_STATES = [ + STATUS.FAILURE, + STATUS.APPROVAL_FAILED, + STATUS.WARNING, + STATUS.ERROR, + STATUS.ABORTED, + STATUS.SYS_FAILURE, + STATUS.SYS_ERROR, + STATUS.SYS_ABORTED, + ] + + +class JOBS: + class STATES: + ACTIVE = "ACTIVE" + DELETED = "DELETED" + INACTIVE = "INACTIVE" + + +class JOBINSTANCES: + class STATES: + SCHEDULED = "SCHEDULED" + RUNNING = "RUNNING" + SKIPPED = "SKIPPED" + FAILED = "FAILED" + + +class RUNBOOK: + class STATES: + ACTIVE = "ACTIVE" + DELETED = "DELETED" + DRAFT = "DRAFT" + ERROR = "ERROR" + + +class ENDPOINT: + class STATES: + ACTIVE = "ACTIVE" + DELETED = "DELETED" + DRAFT = "DRAFT" + ERROR = "ERROR" + + class TYPES: + HTTP = "HTTP" + WINDOWS = "Windows" + LINUX = "Linux" + + class VALUE_TYPES: + VM = "VM" + IP = "IP" + + +class BLUEPRINT: + class STATES: + ACTIVE = "ACTIVE" + DELETED = "DELETED" + DRAFT = "DRAFT" + ERROR = "ERROR" + + +class APPLICATION: + class STATES: + PROVISIONING = "provisioning" + STOPPED = "stopped" + RUNNING = "running" + ERROR = "error" + DELETED = "deleted" + DELETING = "deleting" + STARTING = "starting" + STOPPING = "stopping" + RESTARTING = "restarting" + BUSY = "busy" + TIMEOUT = "timeout" + RESTARTING = "restarting" + UPDATING = "updating" + + +class ACCOUNT: + class STATES: + DELETED = "DELETED" + VERIFIED = "VERIFIED" + NOT_VERIFIED = "NOT_VERIFIED" + VERIFY_FAILED = "VERIFY_FAILED" + DRAFT = "DRAFT" + ACTIVE = "ACTIVE" + UNSAVED = "UNSAVED" + + class TYPES: + AWS = "aws" + AHV = "nutanix" + KUBERNETES = "k8s" + AZURE = "azure" + GCP = "gcp" + VMWARE = "vmware" + + +class SINGLE_INPUT: + class TYPE: + TEXT = "text" + PASSWORD = "password" + CHECKBOX = "checkbox" + SELECT = "select" + SELECTMULTIPLE = "selectmultiple" + DATE = "date" + TIME = "time" + DATETIME = "datetime" + + VALID_TYPES = [ + TYPE.TEXT, + TYPE.PASSWORD, + TYPE.CHECKBOX, + TYPE.SELECT, + TYPE.SELECTMULTIPLE, + TYPE.DATE, + TYPE.TIME, + TYPE.DATETIME, + ] + + +class SYSTEM_ACTIONS: + CREATE = "create" + START = "start" + RESTART = "restart" + UPDATE = "update" + STOP = "stop" + DELETE = "delete" + SOFT_DELETE = "soft_delete" + + +class MARKETPLACE_ITEM: + class TYPES: + BLUEPRINT = "blueprint" + RUNBOOK = "runbook" + + class STATES: + PENDING = "PENDING" + ACCEPTED = "ACCEPTED" + REJECTED = "REJECTED" + PUBLISHED = "PUBLISHED" + + class SOURCES: + GLOBAL = "GLOBAL_STORE" + LOCAL = "LOCAL" + + +class TASKS: + class TASK_TYPES: + EXEC = "EXEC" + SET_VARIABLE = "SET_VARIABLE" + HTTP = "HTTP" + + class SCRIPT_TYPES: + POWERSHELL = "npsscript" + SHELL = "sh" + ESCRIPT = "static" + + class STATES: + ACTIVE = "ACTIVE" + DELETED = "DELETED" + DRAFT = "DRAFT" + + +class ERGON_TASK: + class STATUS: + QUEUED = "QUEUED" + RUNNING = "RUNNING" + ABORTED = "ABORTED" + SUCCEEDED = "SUCCEEDED" + SUSPENDED = "SUSPENDED" + FAILED = "FAILED" + + TERMINAL_STATES = [ + STATUS.SUCCEEDED, + STATUS.FAILED, + STATUS.ABORTED, + STATUS.SUSPENDED, + ] + + FAILURE_STATES = [STATUS.FAILED, STATUS.ABORTED, STATUS.SUSPENDED] + + +class ACP: + class ENTITY_FILTER_EXPRESSION_LIST: + DEVELOPER = [ + { + "operator": "IN", + "left_hand_side": {"entity_type": "image"}, + "right_hand_side": {"collection": "ALL"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "marketplace_item"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "app_icon"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_task"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_variable"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "virtual_network"}, + "right_hand_side": {"collection": "ALL"}, + }, + ] + + OPERATOR = [ + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "app_icon"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + ] + + CONSUMER = [ + { + "operator": "IN", + "left_hand_side": {"entity_type": "image"}, + "right_hand_side": {"collection": "ALL"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "marketplace_item"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "app_icon"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_task"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_variable"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "virtual_network"}, + "right_hand_side": {"collection": "ALL"}, + }, + ] + + PROJECT_ADMIN = [ + { + "operator": "IN", + "left_hand_side": {"entity_type": "image"}, + "right_hand_side": {"collection": "ALL"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "marketplace_item"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "directory_service"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "role"}, + }, + { + "operator": "IN", + "right_hand_side": {"uuid_list": []}, + "left_hand_side": {"entity_type": "project"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "user"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "user_group"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "SELF_OWNED"}, + "left_hand_side": {"entity_type": "environment"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "app_icon"}, + }, + { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_task"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "app_variable"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + { + "operator": "IN", + "left_hand_side": {"entity_type": "virtual_network"}, + "right_hand_side": {"collection": "ALL"}, + }, + ] + + CUSTOM_ROLE_PERMISSIONS_FILTERS = [ + { + "permission": "view_image", + "filter": { + "operator": "IN", + "left_hand_side": {"entity_type": "image"}, + "right_hand_side": {"collection": "ALL"}, + }, + }, + { + "permission": "view_app_icon", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "app_icon"}, + }, + }, + { + "permission": "view_name_category", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + }, + { + "permission": "create_or_update_name_category", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "category"}, + }, + }, + { + "permission": "view_environment", + "filter": { + "operator": "IN", + "left_hand_side": {"entity_type": "environment"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + }, + { + "permission": "view_marketplace_item", + "filter": { + "operator": "IN", + "left_hand_side": {"entity_type": "marketplace_item"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + }, + { + "permission": "view_user", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "user"}, + }, + }, + { + "permission": "view_user_group", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "user_group"}, + }, + }, + { + "permission": "view_role", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "role"}, + }, + }, + { + "permission": "view_directory_service", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "directory_service"}, + }, + }, + { + "permission": "search_directory_service", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "directory_service"}, + }, + }, + { + "permission": "view_identity_provider", + "filter": { + "operator": "IN", + "right_hand_side": {"collection": "ALL"}, + "left_hand_side": {"entity_type": "identity_provider"}, + }, + }, + { + "permission": "view_app_task", + "filter": { + "operator": "IN", + "left_hand_side": {"entity_type": "app_task"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + }, + { + "permission": "view_app_variable", + "filter": { + "operator": "IN", + "left_hand_side": {"entity_type": "app_variable"}, + "right_hand_side": {"collection": "SELF_OWNED"}, + }, + }, + ] + + DEFAULT_CONTEXT = { + "scope_filter_expression_list": [ + { + "operator": "IN", + "left_hand_side": "PROJECT", + "right_hand_side": {"uuid_list": []}, + } + ], + "entity_filter_expression_list": [ + { + "operator": "IN", + "left_hand_side": {"entity_type": "ALL"}, + "right_hand_side": {"collection": "ALL"}, + } + ], + } diff --git a/framework/calm/dsl/cli/directory_service_commands.py b/framework/calm/dsl/cli/directory_service_commands.py new file mode 100644 index 0000000..8e69975 --- /dev/null +++ b/framework/calm/dsl/cli/directory_service_commands.py @@ -0,0 +1,40 @@ +import click + +from .directory_services import get_directory_services +from .main import get + + +@get.command("directory_services") +@click.option( + "--name", "-n", default=None, help="Search for directory services by name" +) +@click.option( + "--filter", + "filter_by", + "-f", + default=None, + help="Filter directory services by this string", +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only directory service names", +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_directory_services(name, filter_by, limit, offset, quiet, out): + """Get directory services, optionally filtered by a string""" + + get_directory_services(name, filter_by, limit, offset, quiet, out) diff --git a/framework/calm/dsl/cli/directory_services.py b/framework/calm/dsl/cli/directory_services.py new file mode 100644 index 0000000..c60de2e --- /dev/null +++ b/framework/calm/dsl/cli/directory_services.py @@ -0,0 +1,91 @@ +import click +import json +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle + +from .utils import get_name_query, highlight_text + + +LOG = get_logging_handle(__name__) + + +def get_directory_services(name, filter_by, limit, offset, quiet, out): + """Get the directory services, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.directory_service.list(params=params) + + if err: + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch directory_services from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No directory service found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "DIRECTORY TYPE", + "DOMAIN NAME", + "URL", + "STATE", + "UUID", + ] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["resources"]["directory_type"]), + highlight_text(row["resources"]["domain_name"]), + highlight_text(row["resources"]["url"]), + highlight_text(row["state"]), + highlight_text(metadata["uuid"]), + ] + ) + + click.echo(table) diff --git a/framework/calm/dsl/cli/endpoint_commands.py b/framework/calm/dsl/cli/endpoint_commands.py new file mode 100644 index 0000000..25a18e1 --- /dev/null +++ b/framework/calm/dsl/cli/endpoint_commands.py @@ -0,0 +1,121 @@ +import click + +from calm.dsl.log import get_logging_handle + +from .main import compile, get, describe, delete, create, format +from .endpoints import ( + get_endpoint_list, + create_endpoint_command, + delete_endpoint, + describe_endpoint, + format_endpoint_command, + compile_endpoint_command, +) + +LOG = get_logging_handle(__name__) + + +@get.command("endpoints", feature_min_version="3.0.0", experimental=True) +@click.option("--name", "-n", default=None, help="Search for endpoints by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter endpoints by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only endpoint names" +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +def _get_endpoint_list(name, filter_by, limit, offset, quiet, all_items): + """Get the endpoints, optionally filtered by a string""" + + get_endpoint_list(name, filter_by, limit, offset, quiet, all_items) + + +@create.command("endpoint", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "endpoint_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Endpoint file to upload", +) +@click.option("--name", "-n", default=None, help="Endpoint name (Optional)") +@click.option("--description", default=None, help="Endpoint description (Optional)") +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="Deletes existing blueprint with the same name before create.", +) +def _create_endpoint_command(endpoint_file, name, description, force): + """Creates a endpoint""" + + create_endpoint_command(endpoint_file, name, description, force) + + +@delete.command("endpoint", feature_min_version="3.0.0", experimental=True) +@click.argument("endpoint_names", nargs=-1) +def _delete_endpoint(endpoint_names): + """Deletes endpoints""" + + delete_endpoint(endpoint_names) + + +@describe.command("endpoint", feature_min_version="3.0.0", experimental=True) +@click.argument("endpoint_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format [text|json].", +) +def _describe_endpoint(endpoint_name, out): + """Describe a endpoint""" + + describe_endpoint(endpoint_name, out) + + +@format.command("endpoint", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "endpoint_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Endpoint file to format", +) +def _format_endpoint_command(endpoint_file): + """black formats the endpoint file""" + + format_endpoint_command(endpoint_file) + + +@compile.command("endpoint", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "endpoint_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Endpoint file to upload", +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["json", "yaml"]), + default="json", + help="output format [json|yaml].", +) +def _compile_endpoint_command(endpoint_file, out): + """Compiles a DSL (Python) endpoint into JSON or YAML""" + compile_endpoint_command(endpoint_file, out) diff --git a/framework/calm/dsl/cli/endpoints.py b/framework/calm/dsl/cli/endpoints.py new file mode 100644 index 0000000..8cb2bf4 --- /dev/null +++ b/framework/calm/dsl/cli/endpoints.py @@ -0,0 +1,454 @@ +import json +import time +import sys +import pathlib + +from ruamel import yaml +import arrow +import click +from prettytable import PrettyTable +from black import format_file_in_place, WriteBack, FileMode + +from calm.dsl.builtins.models.metadata_payload import get_metadata_payload +from calm.dsl.runbooks import Endpoint, create_endpoint_payload +from calm.dsl.config import get_context +from calm.dsl.api import get_api_client + +from calm.dsl.log import get_logging_handle +from calm.dsl.tools import get_module_from_file + +from .utils import get_name_query, highlight_text, get_states_filter +from .constants import ENDPOINT +from calm.dsl.constants import CACHE +from calm.dsl.store import Cache + +LOG = get_logging_handle(__name__) + + +def get_endpoint_list(name, filter_by, limit, offset, quiet, all_items): + """Get the endpoints, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + + if all_items: + filter_query += get_states_filter(ENDPOINT.STATES, state_key="_state") + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.endpoint.list(params=params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch endpoints from {}".format(pc_ip)) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo(highlight_text("No endpoint found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "TYPE", + "DESCRIPTION", + "PROJECT", + "STATE", + "CREATED BY", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + created_by = metadata.get("owner_reference", {}).get("name", "") + last_update_time = int(metadata["last_update_time"]) // 1000000 + project = ( + metadata["project_reference"]["name"] + if "project_reference" in metadata + else None + ) + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["type"]), + highlight_text(row["description"]), + highlight_text(project), + highlight_text(row["state"]), + highlight_text(created_by), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(row["uuid"]), + ] + ) + click.echo(table) + + +def get_endpoint_module_from_file(endpoint_file): + """Return Endpoint module given a user endpoint dsl file (.py)""" + return get_module_from_file("calm.dsl.user_endpoint", endpoint_file) + + +def get_endpoint_class_from_module(user_endpoint_module): + """Returns endpoint class given a module""" + + UserEndpoint = None + for item in dir(user_endpoint_module): + obj = getattr(user_endpoint_module, item) + if isinstance(obj, type(Endpoint)): + if obj.__bases__[0] is Endpoint: + UserEndpoint = obj + + return UserEndpoint + + +def compile_endpoint(endpoint_file): + + user_endpoint_module = get_endpoint_module_from_file(endpoint_file) + UserEndpoint = get_endpoint_class_from_module(user_endpoint_module) + if UserEndpoint is None: + return None + + endpoint_payload = None + UserEndpointPayload, _ = create_endpoint_payload(UserEndpoint) + endpoint_payload = UserEndpointPayload.get_dict() + + return endpoint_payload + + +def compile_endpoint_command(endpoint_file, out): + + endpoint_payload = compile_endpoint(endpoint_file) + if endpoint_payload is None: + LOG.error("User endpoint not found in {}".format(endpoint_file)) + return + + metadata_payload = get_metadata_payload(endpoint_file) + project_cache_data = {} + project_name = "" + if "project_reference" in metadata_payload: + project_cache_data = metadata_payload["project_reference"] + project_name = metadata_payload["project_reference"]["name"] + else: + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + project_cache_data = Cache.get_entity_data( + CACHE.ENTITY.PROJECT, name=project_name + ) + + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format( + project_name + ) + ) + sys.exit(-1) + + project_uuid = project_cache_data.get("uuid", "") + endpoint_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_uuid, + "name": project_name, + } + + if out == "json": + click.echo(json.dumps(endpoint_payload, indent=4, separators=(",", ": "))) + elif out == "yaml": + click.echo(yaml.dump(endpoint_payload, default_flow_style=False)) + else: + LOG.error("Unknown output format {} given".format(out)) + + +def get_endpoint(client, name, all=False): + + # find endpoint + params = {"filter": "name=={}".format(name)} + if not all: + params["filter"] += ";deleted==FALSE" + + res, err = client.endpoint.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + endpoint = None + if entities: + if len(entities) != 1: + raise Exception("More than one endpoint found - {}".format(entities)) + + LOG.info("{} found ".format(name)) + endpoint = entities[0] + else: + raise Exception("No endpoint found with name {} found".format(name)) + return endpoint + + +def create_endpoint( + client, + endpoint_payload, + name=None, + description=None, + force_create=False, +): + + endpoint_payload.pop("status", None) + + if name: + endpoint_payload["spec"]["name"] = name + endpoint_payload["metadata"]["name"] = name + + if description: + endpoint_payload["spec"]["description"] = description + + endpoint_resources = endpoint_payload["spec"]["resources"] + endpoint_name = endpoint_payload["spec"]["name"] + endpoint_desc = endpoint_payload["spec"]["description"] + user_project = endpoint_payload["metadata"].get("project_reference", {}) + + return client.endpoint.upload_with_secrets( + endpoint_name, + endpoint_desc, + endpoint_resources, + force_create=force_create, + project_reference=user_project, + ) + + +def create_endpoint_from_json( + client, + path_to_json, + name=None, + description=None, + force_create=False, + endpoint_metadata=None, +): + + endpoint_payload = json.loads(open(path_to_json, "r").read()) + + if endpoint_metadata and "project_reference" in endpoint_metadata: + endpoint_payload["metadata"]["project_reference"] = endpoint_metadata[ + "project_reference" + ] + + return create_endpoint( + client, + endpoint_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_endpoint_from_dsl( + client, + endpoint_file, + name=None, + description=None, + force_create=False, + endpoint_metadata=None, +): + + endpoint_payload = compile_endpoint(endpoint_file) + if endpoint_payload is None: + err_msg = "User endpoint not found in {}".format(endpoint_file) + err = {"error": err_msg, "code": -1} + return None, err + + if endpoint_metadata and "project_reference" in endpoint_metadata: + endpoint_payload["metadata"]["project_reference"] = endpoint_metadata[ + "project_reference" + ] + + return create_endpoint( + client, + endpoint_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_endpoint_command(endpoint_file, name, description, force): + """Creates a endpoint""" + + client = get_api_client() + + endpoint_metadata = get_metadata_payload(endpoint_file) + + if endpoint_file.endswith(".json"): + res, err = create_endpoint_from_json( + client, + endpoint_file, + name=name, + description=description, + force_create=force, + endpoint_metadata=endpoint_metadata, + ) + elif endpoint_file.endswith(".py"): + res, err = create_endpoint_from_dsl( + client, + endpoint_file, + name=name, + description=description, + force_create=force, + endpoint_metadata=endpoint_metadata, + ) + else: + LOG.error("Unknown file format {}".format(endpoint_file)) + return + + if err: + LOG.error(err["error"]) + return + + endpoint = res.json() + endpoint_uuid = endpoint["metadata"]["uuid"] + endpoint_name = endpoint["metadata"]["name"] + endpoint_status = endpoint.get("status", {}) + endpoint_state = endpoint_status.get("state", "DRAFT") + LOG.debug("Endpoint {} has state: {}".format(endpoint_name, endpoint_state)) + + if endpoint_state != "ACTIVE": + msg_list = endpoint_status.get("message_list", []) + if not msg_list: + LOG.debug(json.dumps(endpoint_status)) + LOG.error("Endpoint {} created with errors.".format(endpoint_name)) + sys.exit(-1) + + msgs = [] + for msg_dict in msg_list: + msgs.append(msg_dict.get("message", "")) + + LOG.error( + "Endpoint {} created with {} error(s): {}.".format( + endpoint_name, len(msg_list), msgs + ) + ) + sys.exit(-1) + + LOG.info("Endpoint {} created successfully.".format(endpoint_name)) + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/endpoints/{}".format( + pc_ip, pc_port, endpoint_uuid + ) + + stdout_dict = {"name": endpoint_name, "link": link, "state": endpoint_state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def describe_endpoint(endpoint_name, out): + """Displays endpoint data""" + + client = get_api_client() + endpoint = get_endpoint(client, endpoint_name, all=True) + + res, err = client.endpoint.read(endpoint["metadata"]["uuid"]) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + endpoint = res.json() + + if out == "json": + endpoint.pop("status", None) + click.echo(json.dumps(endpoint, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Endpoint Summary----\n") + click.echo( + "Name: " + + highlight_text(endpoint_name) + + " (uuid: " + + highlight_text(endpoint["metadata"]["uuid"]) + + ")" + ) + click.echo("Description: " + highlight_text(endpoint["status"]["description"])) + click.echo("Status: " + highlight_text(endpoint["status"]["state"])) + click.echo( + "Owner: " + highlight_text(endpoint["metadata"]["owner_reference"]["name"]), + nl=False, + ) + project = endpoint["metadata"].get("project_reference", {}) + click.echo(" Project: " + highlight_text(project.get("name", ""))) + + created_on = int(endpoint["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + last_updated = int(endpoint["metadata"]["last_update_time"]) // 1000000 + past = arrow.get(last_updated).humanize() + click.echo( + "Last Updated: {} ({})\n".format( + highlight_text(time.ctime(last_updated)), highlight_text(past) + ) + ) + endpoint_resources = endpoint.get("status").get("resources", {}) + + endpoint_type = endpoint_resources.get("type", "") + endpoint_attrs = endpoint_resources.get("attrs", {}) + click.echo("Type: {}".format(highlight_text(endpoint_type))) + if endpoint_type == ENDPOINT.TYPES.HTTP: + url = endpoint_attrs.get("url", "") + click.echo("URL: {}\n".format(highlight_text(url))) + else: + value_type = endpoint_attrs.get("value_type", "IP") + value_type += "s" + values = endpoint_attrs.get("values", []) + element_count = endpoint_resources.get("element_count") + click.echo("VM Count: {}".format(highlight_text(element_count))) + click.echo("{}: {}\n".format(value_type, highlight_text(values))) + + +def delete_endpoint(endpoint_names): + + client = get_api_client() + + for endpoint_name in endpoint_names: + endpoint = get_endpoint(client, endpoint_name) + endpoint_id = endpoint["metadata"]["uuid"] + res, err = client.endpoint.delete(endpoint_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info("Endpoint {} deleted".format(endpoint_name)) + + +def format_endpoint_command(endpoint_file): + path = pathlib.Path(endpoint_file) + LOG.debug("Formatting endpoint {} using black".format(path)) + if format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.DIFF + ): + LOG.info("Patching above diff to endpoint - {}".format(path)) + format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.YES + ) + LOG.info("All done!") + else: + LOG.info("Endpoint {} left unchanged.".format(path)) diff --git a/framework/calm/dsl/cli/environment_commands.py b/framework/calm/dsl/cli/environment_commands.py new file mode 100644 index 0000000..d3955d5 --- /dev/null +++ b/framework/calm/dsl/cli/environment_commands.py @@ -0,0 +1,151 @@ +import click + +from .main import get, delete, create, update, compile +from .environments import ( + create_environment_from_dsl_file, + get_environment_list, + delete_environment, + update_environment_from_dsl_file, + compile_environment_command, +) + +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.command("environments", feature_min_version="3.2.0") +@click.option("--name", "-n", default=None, help="Search for environments by name") +@click.option( + "--filter", + "filter_by", + "-f", + default=None, + help="Filter environments by this string", +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only environments names." +) +@click.option("--project", "-p", "project_name", help="Project name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_environment_list(name, filter_by, limit, offset, quiet, out, project_name): + """Get the environment, optionally filtered by a string""" + + get_environment_list(name, filter_by, limit, offset, quiet, out, project_name) + + +@delete.command("environment", feature_min_version="3.2.0") +@click.argument("environment_name") +@click.option("--project", "-p", "project_name", help="Project name", required=True) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +def _delete_environment(environment_name, project_name, no_cache_update): + """Deletes a environment""" + + delete_environment(environment_name, project_name, no_cache_update) + + +@create.command("environment", feature_min_version="3.2.0") +@click.option( + "--file", + "-f", + "env_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of environment file to create", +) +@click.option("--project", "-p", "project_name", help="Project name", required=True) +@click.option( + "--name", "-n", "env_name", default=None, help="Environment name (Optional)" +) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +def _create_environment(env_file, env_name, project_name, no_cache_update): + """ + Creates a environment to existing project. + """ + + if env_file.endswith(".py"): + create_environment_from_dsl_file( + env_file, env_name, project_name, no_cache_update + ) + else: + LOG.error("Unknown file format {}".format(env_file)) + return + + +@update.command("environment", feature_min_version="3.2.0") +@click.argument("env_name") +@click.option("--project", "-p", "project_name", help="Project name", required=True) +@click.option( + "--file", + "-f", + "env_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of environment file to update", +) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +def _update_environment(env_name, project_name, env_file, no_cache_update): + """ + Updates environment of an existing project. + """ + + if env_file.endswith(".py"): + update_environment_from_dsl_file( + env_name, env_file, project_name, no_cache_update + ) + else: + LOG.error("Unknown file format {}".format(env_file)) + return + + +@compile.command("environment") +@click.option( + "--file", + "-f", + "env_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Environment file", +) +@click.option("--project", "-p", "project_name", help="Project name", required=True) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["json", "yaml"]), + default="json", + help="output format", +) +def _compile_environment_command(env_file, project_name, out): + """Compiles a DSL (Python) environment into JSON or YAML""" + + compile_environment_command(env_file, project_name, out) diff --git a/framework/calm/dsl/cli/environments.py b/framework/calm/dsl/cli/environments.py new file mode 100644 index 0000000..66a7466 --- /dev/null +++ b/framework/calm/dsl/cli/environments.py @@ -0,0 +1,511 @@ +import sys +import uuid +import click +import json +import time +import arrow +from prettytable import PrettyTable +from ruamel import yaml + +from calm.dsl.config import get_context +from calm.dsl.api import get_api_client +from calm.dsl.builtins import create_environment_payload, Environment +from calm.dsl.builtins.models.helper.common import get_project +from calm.dsl.tools import get_module_from_file +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle + +from .utils import ( + get_name_query, + highlight_text, +) + +LOG = get_logging_handle(__name__) + + +def create_environment(env_payload): + + client = get_api_client() + + env_payload.pop("status", None) + + env_name = env_payload["spec"]["name"] + LOG.info("Creating environment '{}'".format(env_name)) + res, err = client.environment.create(env_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + env_uuid = res["metadata"]["uuid"] + env_state = res["status"]["state"] + LOG.info( + "Environment '{}' created successfully. Environment state: '{}'".format( + env_name, env_state + ) + ) + + stdout_dict = {"name": env_name, "uuid": env_uuid} + + return stdout_dict + + +def compile_environment_dsl_class(env_cls, metadata=dict()): + """ + Helper to compile the environment class + Args: + env_cls (Environment object): class for environment + metadata (dict): Metadata object + Returns: + response (object): Response object containing environment object details + """ + + infra = getattr(env_cls, "providers", []) + if not infra: + LOG.warning( + "From Calm v3.2, providers(infra) will be required to use environment for blueprints/marketplace usage" + ) + + UserEnvPayload, _ = create_environment_payload(env_cls, metadata=metadata) + env_payload = UserEnvPayload.get_dict() + + # Pop default attribute from credentials + for cred in env_payload["spec"]["resources"].get("credential_definition_list", []): + cred.pop("default", None) + + # Adding uuid to creds and substrates + for cred in env_payload["spec"]["resources"].get("credential_definition_list", []): + cred["uuid"] = str(uuid.uuid4()) + + for sub in env_payload["spec"]["resources"].get("substrate_definition_list", []): + sub["uuid"] = str(uuid.uuid4()) + + # Adding uuid readiness-probe + cred_name_uuid_map = {} + for cred in env_payload["spec"]["resources"].get("credential_definition_list", []): + cred_name_uuid_map[cred["name"]] = cred["uuid"] + + for sub in env_payload["spec"]["resources"].get("substrate_definition_list", []): + try: + cred_ref_obj = sub["readiness_probe"]["login_credential_local_reference"] + cred_ref_obj["uuid"] = cred_name_uuid_map[cred_ref_obj["name"]] + except Exception: + pass + + # TODO check if credential ref is working in attributes consuming credentials + + return env_payload + + +def compile_environment_command(env_file, project_name, out): + """ + Compiles a DSL (Python) environment into JSON or YAML + Args: + env_file (str): Location for environment python file + project_name (str): Project name + out (str): Output format + Returns: + stdout (object): environment payload + """ + + # Update project on context + ContextObj = get_context() + ContextObj.update_project_context(project_name=project_name) + + user_env_module = get_environment_module_from_file(env_file) + UserEnvironment = get_env_class_from_module(user_env_module) + if UserEnvironment is None: + LOG.error("User environment not found in {}".format(env_file)) + return + + env_payload = compile_environment_dsl_class(UserEnvironment) + + # Reset context + ContextObj.reset_configuration() + + if out == "json": + click.echo(json.dumps(env_payload, indent=4, separators=(",", ": "))) + elif out == "yaml": + click.echo(yaml.dump(env_payload, default_flow_style=False)) + else: + LOG.error("Unknown output format {} given".format(out)) + + +def create_environment_from_dsl_class(env_cls, env_name="", metadata=dict()): + """ + Helper creates an environment from dsl environment class + Args: + env_cls (Environment object): class for environment + env_name (str): Environment name (Optional) + metadata (dict): Metadata object + Returns: + response (object): Response object containing environment object details + """ + + env_payload = compile_environment_dsl_class(env_cls, metadata) + + if env_name: + env_payload["spec"]["name"] = env_name + env_payload["metadata"]["name"] = env_name + + return create_environment(env_payload) + + +def update_project_envs(project_name, remove_env_uuids=[], add_env_uuids=[]): + """ + Update project with the environment reference list if not present + Args: + project_name(str): Name of project + remove_env_uuids(list): list of env uuids to be removed from project + add_env_uuids(list): list of env uuid to be added in project + Returns: None + """ + if not (remove_env_uuids or add_env_uuids): + return + + project_payload = get_project(project_name) + project_payload.pop("status", None) + + env_list = project_payload["spec"]["resources"].get( + "environment_reference_list", [] + ) + for _eu in add_env_uuids: + env_list.append({"kind": "environment", "uuid": _eu}) + + final_env_list = [] + for _edata in env_list: + if _edata["uuid"] not in remove_env_uuids: + final_env_list.append(_edata) + + project_payload["spec"]["resources"]["environment_reference_list"] = final_env_list + project_uuid = project_payload["metadata"]["uuid"] + + # TODO remove this infunction imports + from .projects import update_project + + return update_project(project_uuid, project_payload) + + +def get_environment_module_from_file(env_file): + """Returns Environment module given a user environment dsl file (.py)""" + return get_module_from_file("calm.dsl.user_environment", env_file) + + +def get_env_class_from_module(user_env_module): + """Returns environment class given a module""" + + UserEnvironment = None + for item in dir(user_env_module): + obj = getattr(user_env_module, item) + if isinstance(obj, type(Environment)): + if obj.__bases__[0] == Environment: + UserEnvironment = obj + + return UserEnvironment + + +def create_environment_from_dsl_file( + env_file, env_name, project_name, no_cache_update=False +): + """ + Helper creates an environment from dsl file (for calm_version >= 3.2) + Args: + env_file (str): Location for environment python file + env_name (str): Environment name + project_name (str): Project name + Returns: + response (object): Response object containing environment object details + """ + + # Update project on context + ContextObj = get_context() + ContextObj.update_project_context(project_name=project_name) + + user_env_module = get_environment_module_from_file(env_file) + UserEnvironment = get_env_class_from_module(user_env_module) + if UserEnvironment is None: + LOG.error("User environment not found in {}".format(env_file)) + return + + env_std_out = create_environment_from_dsl_class( + env_cls=UserEnvironment, env_name=env_name + ) + + # Reset context + ContextObj.reset_configuration() + + LOG.info("Updating project for environment configuration") + update_project_envs(project_name, add_env_uuids=[env_std_out.get("uuid")]) + LOG.info("Project updated successfully") + + click.echo(json.dumps(env_std_out, indent=4, separators=(",", ": "))) + + if no_cache_update: + LOG.info("skipping environments cache update") + else: + LOG.info("Updating environments cache ...") + Cache.add_one( + entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=env_std_out.get("uuid") + ) + LOG.info("[Done]") + + +def update_environment_from_dsl_file( + env_name, env_file, project_name, no_cache_update=False +): + """ + Helper updates an environment from dsl file (for calm_version >= 3.2) + Args: + env_name (str): Environment name + env_file (str): Location for environment python file + project_name (str): Project name + Returns: + response (object): Response object containing environment object details + """ + + # Update project on context + ContextObj = get_context() + ContextObj.update_project_context(project_name=project_name) + + environment = get_environment(env_name, project_name) + environment_id = environment["metadata"]["uuid"] + + env_data_to_upload = get_environment_by_uuid(environment_id) + env_data_to_upload.pop("status", None) + + # TODO Merge these module-file logic to single helper + user_env_module = get_environment_module_from_file(env_file) + UserEnvironment = get_env_class_from_module(user_env_module) + if UserEnvironment is None: + LOG.error("User environment not found in {}".format(env_file)) + sys.exit("User environment not found in {}".format(env_file)) + + env_new_payload = compile_environment_dsl_class(UserEnvironment) + + # Overriding exsiting substrates and credentials (new-ones) + env_data_to_upload["spec"]["resources"][ + "substrate_definition_list" + ] = env_new_payload["spec"]["resources"]["substrate_definition_list"] + env_data_to_upload["spec"]["resources"][ + "credential_definition_list" + ] = env_new_payload["spec"]["resources"]["credential_definition_list"] + env_data_to_upload["spec"]["resources"]["infra_inclusion_list"] = env_new_payload[ + "spec" + ]["resources"]["infra_inclusion_list"] + + # Reset context + ContextObj.reset_configuration() + + # Update environment + LOG.info("Updating environment '{}'".format(env_name)) + client = get_api_client() + res, err = client.environment.update( + uuid=environment_id, payload=env_data_to_upload + ) + if err: + LOG.error(err) + sys.exit(err["error"]) + + res = res.json() + stdout_dict = { + "name": res["metadata"]["name"], + "uuid": res["metadata"]["uuid"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + if no_cache_update: + LOG.info("skipping environments cache update") + else: + LOG.info("Updating environments cache ...") + Cache.update_one(entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment_id) + LOG.info("[Done]") + + +def get_project_environment(name=None, uuid=None, project_name=None, project_uuid=None): + """Get project and environment with the given project and environment name or uuid. Raises exception if + environment doesn't belong to the project""" + + client = get_api_client() + project_data = get_project(project_name, project_uuid) + project_uuid = project_data["metadata"]["uuid"] + project_name = project_data["status"]["name"] + environments = project_data["status"]["resources"]["environment_reference_list"] + project_environments = {row["uuid"]: True for row in environments} + + if not name and not uuid: + return None, project_data + + if uuid is None: + params = {"filter": "name=={};project_reference=={}".format(name, project_uuid)} + LOG.info( + "Searching for the environment {} under project {}".format( + name, project_name + ) + ) + res, err = client.environment.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities") + if not entities: + raise Exception( + "No environment with name {} found in project {}".format( + name, project_name + ) + ) + + environment = entities[0] + uuid = environment["metadata"]["uuid"] + + if not project_environments.get(uuid): + raise Exception( + "No environment with name {} found in project {}".format(name, project_name) + ) + + LOG.info("Environment {} found ".format(name)) + + # for getting additional fields + return get_environment_by_uuid(uuid), project_data + + +def get_environment_by_uuid(environment_uuid): + """Fetch the environment with the given name under the given project""" + LOG.info("Fetching details of environment (uuid='{}')".format(environment_uuid)) + client = get_api_client() + res, err = client.environment.read(environment_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + environment = res.json() + return environment + + +def get_environment_list(name, filter_by, limit, offset, quiet, out, project_name): + """Get the environment, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if project_name: + project_data = get_project(project_name) + project_id = project_data["metadata"]["uuid"] + filter_query = filter_query + ";project_reference=={}".format(project_id) + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.environment.list(params=params) + + if err: + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch environments from {}".format(pc_ip)) + return + + if out == "json": + click.echo(json.dumps(res.json(), indent=4, separators=(",", ": "))) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo(highlight_text("No environment found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "PROJECT", + "STATE", + "CREATED ON", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + project = ( + metadata["project_reference"]["name"] + if "project_reference" in metadata + else None + ) + + creation_time = int(metadata["creation_time"]) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(project), + highlight_text(row["state"]), + highlight_text(time.ctime(creation_time)), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(row.get("uuid", "")), + ] + ) + click.echo(table) + + +def get_environment(environment_name, project_name): + """returns the environment payload""" + + client = get_api_client() + payload = { + "length": 250, + "offset": 0, + "filter": "name=={}".format(environment_name), + } + + if project_name: + project = get_project(project_name) + project_id = project["metadata"]["uuid"] + payload["filter"] += ";project_reference=={}".format(project_id) + + res, err = client.environment.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if res["metadata"]["total_matches"] == 0: + LOG.error("Environment '{}' not found".format(environment_name)) + sys.exit(-1) + + return res["entities"][0] + + +def delete_environment(environment_name, project_name, no_cache_update=False): + + client = get_api_client() + environment = get_environment(environment_name, project_name) + environment_id = environment["metadata"]["uuid"] + _, err = client.environment.delete(environment_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info("Environment {} deleted".format(environment_name)) + + LOG.info("Updating project for environment configuration") + update_project_envs(project_name, remove_env_uuids=[environment_id]) + + if no_cache_update: + LOG.info("skipping environments cache update") + else: + LOG.info("Updating environments cache ...") + Cache.delete_one(entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment_id) + LOG.info("[Done]") diff --git a/framework/calm/dsl/cli/group_commands.py b/framework/calm/dsl/cli/group_commands.py new file mode 100644 index 0000000..395f75f --- /dev/null +++ b/framework/calm/dsl/cli/group_commands.py @@ -0,0 +1,46 @@ +import click + +from .groups import get_groups, create_group, delete_group +from .main import get, create, delete + + +@get.command("groups") +@click.option("--name", "-n", default=None, help="Search for groups by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter groups by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only group names" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_groups(name, filter_by, limit, offset, quiet, out): + """Get groups, optionally filtered by a string""" + + get_groups(name, filter_by, limit, offset, quiet, out) + + +@create.command("group") +@click.option("--name", "-n", required=True, help="Distinguished name of group") +def _create_group(name): + """Creates a user-group""" + + create_group(name) + + +@delete.command("group") +@click.argument("group_names", nargs=-1) +def _delete_group(group_names): + """Deletes a group""" + + delete_group(group_names) diff --git a/framework/calm/dsl/cli/groups.py b/framework/calm/dsl/cli/groups.py new file mode 100644 index 0000000..dea88c6 --- /dev/null +++ b/framework/calm/dsl/cli/groups.py @@ -0,0 +1,181 @@ +import click +import json +import sys +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client +from calm.dsl.builtins import Ref +from .task_commands import watch_task +from .constants import ERGON_TASK +from calm.dsl.config import get_context +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle + +from .utils import get_name_query, highlight_text + + +LOG = get_logging_handle(__name__) + + +def get_groups(name, filter_by, limit, offset, quiet, out): + """Get the groups, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.group.list(params=params) + + if err: + context = get_context() + server_config = context.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch groups from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No group found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + name = row["resources"]["directory_service_user_group"][ + "distinguished_name" + ] + + # For user-groups having caps in the name + try: + name = _row["spec"]["resources"]["directory_service_user_group"][ + "distinguished_name" + ] + except Exception: + pass + click.echo(highlight_text(name)) + return + + table = PrettyTable() + table.field_names = ["NAME", "DISPLAY NAME", "TYPE", "STATE", "UUID"] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + # For user-groups having caps in the name + name = row["resources"]["directory_service_user_group"]["distinguished_name"] + try: + name = _row["spec"]["resources"]["directory_service_user_group"][ + "distinguished_name" + ] + except Exception: + pass + + table.add_row( + [ + highlight_text(name), + highlight_text(row["resources"].get("display_name", "")), + highlight_text(row["resources"]["user_group_type"]), + highlight_text(row["state"]), + highlight_text(metadata["uuid"]), + ] + ) + + click.echo(table) + + +def create_group(name): + """creates user-group on pc""" + + client = get_api_client() + group_payload = { + "spec": { + "resources": {"directory_service_user_group": {"distinguished_name": name}} + }, + "metadata": {"kind": "user_group", "spec_version": 0}, + } + + res, err = client.group.create(group_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + stdout_dict = { + "name": name, + "uuid": res["metadata"]["uuid"], + "execution_context": res["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + user_group_uuid = res["metadata"]["uuid"] + LOG.info("Polling on user-group creation task") + task_state = watch_task( + res["status"]["execution_context"]["task_uuid"], poll_interval=5 + ) + if task_state in ERGON_TASK.FAILURE_STATES: + LOG.exception("User-Group creation task went to {} state".format(task_state)) + sys.exit(-1) + + # Update user-groups in cache + LOG.info("Updating user-groups cache ...") + Cache.add_one(entity_type=CACHE.ENTITY.USER_GROUP, uuid=user_group_uuid) + LOG.info("[Done]") + + +def delete_group(group_names): + """deletes user-group on pc""" + + client = get_api_client() + + deleted_group_uuids = [] + for name in group_names: + group_ref = Ref.Group(name) + res, err = client.group.delete(group_ref["uuid"]) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + deleted_group_uuids.append(group_ref["uuid"]) + LOG.info("Polling on user-group deletion task") + res = res.json() + task_state = watch_task( + res["status"]["execution_context"]["task_uuid"], poll_interval=5 + ) + if task_state in ERGON_TASK.FAILURE_STATES: + LOG.exception( + "User-Group deletion task went to {} state".format(task_state) + ) + sys.exit(-1) + + # Update user-groups in cache + if deleted_group_uuids: + LOG.info("Updating user-groups cache ...") + for _group_uuid in deleted_group_uuids: + Cache.delete_one(entity_type=CACHE.ENTITY.USER_GROUP, uuid=_group_uuid) + LOG.info("[Done]") diff --git a/framework/calm/dsl/cli/init_command.py b/framework/calm/dsl/cli/init_command.py new file mode 100644 index 0000000..fb21759 --- /dev/null +++ b/framework/calm/dsl/cli/init_command.py @@ -0,0 +1,454 @@ +import click +import os +import json +import sys + +from calm.dsl.config import ( + get_context, + set_dsl_config, + get_default_config_file, + get_default_db_file, + get_default_local_dir, + get_default_connection_config, + init_context, +) +from calm.dsl.db import init_db_handle +from calm.dsl.api import get_resource_api, get_client_handle_obj +from calm.dsl.store import Cache +from calm.dsl.init import init_bp, init_runbook +from calm.dsl.providers import get_provider_types + +from .main import init, set +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@init.command("dsl") +@click.option( + "--ip", + "-i", + envvar="CALM_DSL_PC_IP", + default=None, + help="Prism Central server IP or hostname", +) +@click.option( + "--port", + "-P", + envvar="CALM_DSL_PC_PORT", + default=None, + help="Prism Central server port number", +) +@click.option( + "--username", + "-u", + envvar="CALM_DSL_PC_USERNAME", + default=None, + help="Prism Central username", +) +@click.option( + "--password", + "-p", + envvar="CALM_DSL_PC_PASSWORD", + default=None, + help="Prism Central password", +) +@click.option( + "--db_file", + "-d", + "db_file", + envvar="CALM_DSL_DB_LOCATION", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to local database file", +) +@click.option( + "--local_dir", + "-ld", + envvar="CALM_DSL_LOCAL_DIR_LOCATION", + default=None, + type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True), + help="Path to local directory for storing secrets", +) +@click.option( + "--config", + "-cf", + "config_file", + envvar="CALM_DSL_CONFIG_FILE_LOCATION", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to config file to store dsl configuration", +) +@click.option( + "--project", + "-pj", + "project_name", + envvar="CALM_DSL_DEFAULT_PROJECT", + help="Default project name used for entities", +) +def initialize_engine( + ip, + port, + username, + password, + project_name, + db_file, + local_dir, + config_file, +): + """ + \b + Initializes the calm dsl engine. + + NOTE: Env variables(if available) will be used as defaults for configuration + i.) CALM_DSL_PC_IP: Prism Central IP + ii.) CALM_DSL_PC_PORT: Prism Central Port + iii.) CALM_DSL_PC_USERNAME: Prism Central username + iv.) CALM_DSL_PC_PASSWORD: Prism Central password + v.) CALM_DSL_DEFAULT_PROJECT: Default project name + vi.) CALM_DSL_CONFIG_FILE_LOCATION: Default config file location where dsl config will be stored + vii.) CALM_DSL_LOCAL_DIR_LOCATION: Default local directory location to store secrets + viii.) CALM_DSL_DB_LOCATION: Default internal dsl db location + + """ + + set_server_details( + ip=ip, + port=port, + username=username, + password=password, + project_name=project_name, + db_file=db_file, + local_dir=local_dir, + config_file=config_file, + ) + init_db() + sync_cache() + + click.echo("\nHINT: To get started, follow the 3 steps below:") + click.echo("1. Initialize an example blueprint DSL: calm init bp") + click.echo( + "2. Create and validate the blueprint: calm create bp --file HelloBlueprint/blueprint.py" + ) + click.echo( + "3. Start an application using the blueprint: calm launch bp Hello --app_name HelloApp01 -i" + ) + + click.echo("\nKeep Calm and DSL On!\n") + + +def set_server_details( + ip, + port, + username, + password, + project_name, + db_file, + local_dir, + config_file, +): + + if not (ip and port and username and password and project_name): + click.echo("Please provide Calm DSL settings:\n") + + host = ip or click.prompt("Prism Central IP", default="") + port = port or click.prompt("Port", default="9440") + username = username or click.prompt("Username", default="admin") + password = password or click.prompt("Password", default="", hide_input=True) + project_name = project_name or click.prompt("Project", default="default") + + # Default log-level + log_level = "INFO" + + # Default connection params + default_connection_config = get_default_connection_config() + retries_enabled = default_connection_config["retries_enabled"] + connection_timeout = default_connection_config["connection_timeout"] + read_timeout = default_connection_config["read_timeout"] + + # Do not prompt for init config variables, Take default values for init.ini file + config_file = config_file or get_default_config_file() + local_dir = local_dir or get_default_local_dir() + db_file = db_file or get_default_db_file() + + LOG.info("Checking if Calm is enabled on Server") + + # Get temporary client handle + client = get_client_handle_obj(host, port, auth=(username, password)) + Obj = get_resource_api("services/nucalm/status", client.connection) + res, err = Obj.read() + + if err: + click.echo("[Fail]") + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + result = json.loads(res.content) + service_enablement_status = result["service_enablement_status"] + LOG.info(service_enablement_status) + + LOG.info("Verifying the project details") + project_name_uuid_map = client.project.get_name_uuid_map( + params={"filter": "name=={}".format(project_name)} + ) + if not project_name_uuid_map: + LOG.error("Project '{}' not found !!!".format(project_name)) + sys.exit(-1) + LOG.info("Project '{}' verified successfully".format(project_name)) + + # Writing configuration to file + set_dsl_config( + host=host, + port=port, + username=username, + password=password, + project_name=project_name, + log_level=log_level, + config_file=config_file, + db_location=db_file, + local_dir=local_dir, + retries_enabled=retries_enabled, + connection_timeout=connection_timeout, + read_timeout=read_timeout, + ) + + # Updating context for using latest config data + LOG.info("Updating context for using latest config file data") + init_context() + + +def init_db(): + LOG.info("Creating local database") + init_db_handle() + + +def sync_cache(): + Cache.sync() + + +@init.command("bp") +@click.option("--name", "-n", "bp_name", default="Hello", help="Name of blueprint") +@click.option( + "--dir_name", "-d", default=os.getcwd(), help="Directory path for the blueprint" +) +@click.option( + "--type", + "-t", + "provider_type", + type=click.Choice(get_provider_types()), + default="AHV_VM", + help="Provider type", +) +@click.option( + "--bp_type", + "-b", + "blueprint_type", + type=click.Choice(["SINGLE_VM", "MULTI_VM"]), + default="MULTI_VM", + help="Blueprint type", +) +def init_dsl_bp(bp_name, dir_name, provider_type, blueprint_type): + """Creates a starting directory for blueprint""" + + if not bp_name.isidentifier(): + LOG.error("Blueprint name '{}' is not a valid identifier".format(bp_name)) + sys.exit(-1) + + init_bp(bp_name, dir_name, provider_type, blueprint_type) + + +@init.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.option("--name", "-n", "runbook_name", default="Hello", help="Name of runbook") +@click.option( + "--dir_name", "-d", default=os.getcwd(), help="Directory path for the runbook" +) +def init_dsl_runbook(runbook_name, dir_name): + """Creates a starting directory for runbook""" + + if not runbook_name.isidentifier(): + LOG.error("Runbook name '{}' is not a valid identifier".format(runbook_name)) + sys.exit(-1) + + init_runbook(runbook_name, dir_name) + + +# @init.command("scheduler", feature_min_version="3.3.0", experimental=True) +# @click.option("--name", "-n", "job_name", default="Hello", help="Name of job") +# @click.option( +# "--dir_name", "-d", default=os.getcwd(), help="Directory path for the scheduler" +# ) +# def init_dsl_scheduler(job_name, dir_name): +# """Creates a starting directory for runbook""" +# +# if not job_name.isidentifier(): +# LOG.error("Job name '{}' is not a valid identifier".format(job_name)) +# sys.exit(-1) +# +# init_scheduler(job_name, dir_name) + + +@set.command("config") +@click.option( + "--ip", + "-i", + "host", + envvar="PRISM_SERVER_IP", + default=None, + help="Prism Central server IP or hostname", +) +@click.option( + "--port", + "-P", + envvar="PRISM_SERVER_PORT", + default=None, + help="Prism Central server port number", +) +@click.option( + "--username", + "-u", + envvar="PRISM_USERNAME", + default=None, + help="Prism Central username", +) +@click.option( + "--password", + "-p", + envvar="PRISM_PASSWORD", + default=None, + help="Prism Central password", +) +@click.option("--project", "-pj", "project_name", help="Project name for entity") +@click.option( + "--db_file", + "-d", + "db_location", + envvar="DATABASE_LOCATION", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to local database file", +) +@click.option( + "--local_dir", + "-ld", + envvar="LOCAL_DIR", + default=None, + type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True), + help="Path to local directory for storing secrets", +) +@click.option("--log_level", "-l", default=None, help="Default log level") +@click.option( + "--retries-enabled/--retries-disabled", + "-re/-rd", + default=None, + help="Retries enabled/disabled", +) +@click.option( + "--connection-timeout", + "-ct", + type=int, + help="connection timeout", +) +@click.option( + "--read-timeout", + "-rt", + type=int, + help="read timeout", +) +@click.argument("config_file", required=False) +def _set_config( + host, + port, + username, + password, + project_name, + db_location, + log_level, + config_file, + local_dir, + retries_enabled, + connection_timeout, + read_timeout, +): + """writes the configuration to config files i.e. config.ini and init.ini + + \b + Note: Cache will be updated if supplied host is different from configured host. + """ + + # Fetching context object + ContextObj = get_context() + + server_config = ContextObj.get_server_config() + + # Update cache if there is change in host ip + update_cache = host != server_config["pc_ip"] if host else False + host = host or server_config["pc_ip"] + username = username or server_config["pc_username"] + port = port or server_config["pc_port"] + password = password or server_config["pc_password"] + + project_config = ContextObj.get_project_config() + project_name = project_name or project_config.get("name") or "default" + + LOG.info("Checking if Calm is enabled on Server") + + # Get temporary client handle + client = get_client_handle_obj(host, port, auth=(username, password)) + Obj = get_resource_api("services/nucalm/status", client.connection) + res, err = Obj.read() + + if err: + click.echo("[Fail]") + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + result = json.loads(res.content) + service_enablement_status = result["service_enablement_status"] + LOG.info(service_enablement_status) + + LOG.info("Verifying the project details") + project_name_uuid_map = client.project.get_name_uuid_map( + params={"filter": "name=={}".format(project_name)} + ) + if not project_name_uuid_map: + LOG.error("Project '{}' not found !!!".format(project_name)) + sys.exit(-1) + LOG.info("Project '{}' verified successfully".format(project_name)) + + log_config = ContextObj.get_log_config() + log_level = log_level or log_config.get("level") or "INFO" + + # Take init_configuration from user params or init file + init_config = ContextObj.get_init_config() + config_file = ( + config_file or ContextObj._CONFIG_FILE or init_config["CONFIG"]["location"] + ) + db_location = db_location or init_config["DB"]["location"] + local_dir = local_dir or init_config["LOCAL_DIR"]["location"] + + # Get connection config + connection_config = ContextObj.get_connection_config() + if retries_enabled is None: # Not supplied in command + retries_enabled = connection_config["retries_enabled"] + connection_timeout = connection_timeout or connection_config["connection_timeout"] + read_timeout = read_timeout or connection_config["read_timeout"] + + # Set the dsl configuration + set_dsl_config( + host=host, + port=port, + username=username, + password=password, + project_name=project_name, + db_location=db_location, + log_level=log_level, + local_dir=local_dir, + config_file=config_file, + retries_enabled=retries_enabled, + connection_timeout=connection_timeout, + read_timeout=read_timeout, + ) + LOG.info("Configuration changed successfully") + + # Updating context for using latest config data + init_context() + if update_cache: + sync_cache() diff --git a/framework/calm/dsl/cli/library_tasks.py b/framework/calm/dsl/cli/library_tasks.py new file mode 100644 index 0000000..5619d07 --- /dev/null +++ b/framework/calm/dsl/cli/library_tasks.py @@ -0,0 +1,539 @@ +import os +import json +import time +import sys +import ntpath + +import arrow +import click +from prettytable import PrettyTable + +from calm.dsl.config import get_context +from calm.dsl.api import get_api_client +from calm.dsl.log import get_logging_handle +from calm.dsl.tools import get_module_from_file +from calm.dsl.builtins import TaskType +from .utils import ( + get_name_query, + highlight_text, + get_states_filter, +) +from .constants import TASKS + +# from anytree import NodeMixin, RenderTree + +LOG = get_logging_handle(__name__) + + +def get_tasks_list(name, filter_by, limit, offset, quiet, all_items): + """Get the tasks, optionally filtered by a string""" + + client = get_api_client() + context = get_context() + server_config = context.get_server_config() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + + if all_items: + filter_query += get_states_filter(TASKS.STATES) + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.task.list(params=params) + + if err: + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch tasks from {}".format(pc_ip)) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo(highlight_text("No tasks found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "DESCRIPTION", + "PROJECT", + "STATE", + "TASK TYPE", + "SCRIPT TYPE", + "CREATED BY", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + created_by = metadata.get("owner_reference", {}).get("name", "-") + last_update_time = int(metadata["last_update_time"]) // 1000000 + projects = [] + for project in row["resources"]["project_reference_list"]: + projects.append(project["name"]) + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["description"]), + highlight_text(",".join(projects)), + highlight_text(row["state"]), + highlight_text(row["resources"]["type"]), + highlight_text( + row.get("resources", {}).get("attrs", {}).get("script_type", "") + ), + highlight_text(created_by), + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(metadata["uuid"]), + ] + ) + click.echo(table) + + +def describe_task(task_name, out): + """Displays task data""" + + client = get_api_client() + task = get_task(client, task_name, all=True) + + res, err = client.task.read(task["metadata"]["uuid"]) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + task = res.json() + + if out == "json": + click.echo(json.dumps(task, indent=4, separators=(",", ": "))) + return + + projects = [] + for project in task["status"]["resources"]["project_reference_list"]: + projects.append(project["name"]) + + click.echo("\n----Task Summary----\n") + click.echo( + "Name: " + + highlight_text(task_name) + + " (uuid: " + + highlight_text(task["metadata"]["uuid"]) + + ")" + ) + click.echo("Description: " + highlight_text(task["status"]["description"])) + click.echo("Status: " + highlight_text(task["status"]["state"])) + click.echo("Task Type: " + highlight_text(task["status"]["resources"]["type"])) + if task["status"]["resources"]["type"] != TASKS.TASK_TYPES.HTTP: + click.echo( + "Script Type: " + + highlight_text( + task["status"] + .get("resources", {}) + .get("attrs", {}) + .get("script_type", "") + ) + ) + if task["status"]["resources"]["type"] == TASKS.TASK_TYPES.SET_VARIABLE: + click.echo( + "Output Variables: " + + highlight_text( + task["status"] + .get("resources", {}) + .get("attrs", {}) + .get("eval_variables", []) + ) + ) + click.echo( + "Owner: " + highlight_text(task["metadata"]["owner_reference"]["name"]), + nl=False, + ) + click.echo(" Projects: " + highlight_text(",".join(projects))) + + created_on = int(task["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + if task["status"]["resources"]["type"] == TASKS.TASK_TYPES.HTTP: + click.echo( + "Request URL: " + + highlight_text(task["status"]["resources"]["attrs"]["url"]) + ) + click.echo( + "Request Method: " + + highlight_text(task["status"]["resources"]["attrs"]["method"]) + ) + click.echo( + "Content Type: " + + highlight_text(task["status"]["resources"]["attrs"]["content_type"]) + ) + click.echo( + "Headers: " + + highlight_text( + json.dumps(task["status"]["resources"]["attrs"]["headers"]) + ) + ) + click.echo( + "Expected Response Options: " + + highlight_text( + json.dumps( + task["status"]["resources"]["attrs"]["expected_response_params"] + ) + ) + ) + else: + click.echo( + "Script Data: \n\n" + + highlight_text(task["status"]["resources"]["attrs"]["script"]) + ) + + +def get_task(client, name, all=False): + + # find task + params = {"filter": "name=={}".format(name)} + if not all: + params["filter"] += ";state!=DELETED" + + res, err = client.task.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + task = None + if entities: + if len(entities) != 1: + raise Exception("More than one task found - {}".format(entities)) + + LOG.info("{} found ".format(name)) + task = entities[0] + else: + raise Exception("No task found with name {}".format(name)) + return task + + +def delete_task(task_names): + + client = get_api_client() + + for task_name in task_names: + task = get_task(client, task_name) + task_id = task["metadata"]["uuid"] + res, err = client.task.delete(task_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info("Task Library item {} deleted".format(task_name)) + + +def create_update_library_task(client, task_payload, name=None, force_create=None): + """Create/Update Task library item""" + + task_payload.pop("status", None) + task_payload.get("metadata").pop("uuid", None) + task_payload.get("metadata").pop("last_update_time", None) + task_payload.get("metadata").pop("owner_reference", None) + task_payload.get("metadata").pop("creation_time", None) + + # check if task with the given name already exists + params = {"filter": "name=={};state!=DELETED".format(name)} + res, err = client.task.list(params=params) + + if err: + return None, err + + response = res.json() + entities = response.get("entities", None) + if entities: + if len(entities) > 0: + if not force_create: + err_msg = "Task Library item {} already exists. Use --force to delete existing task library item before create.".format( + name + ) + # ToDo: Add command to edit Tasks Library + err = {"error": err_msg, "code": -1} + return None, err + + # --force option used in create. Delete existing task library item with same name. + task_uuid = entities[0]["metadata"]["uuid"] + _, err = client.task.delete(task_uuid) + if err: + return None, err + + context = get_context() + project_config = context.get_project_config() + project_name = project_config["name"] + + # Fetch project details + params = {"filter": "name=={}".format(project_name)} + res, err = client.project.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + if not entities: + raise Exception("No project with name {} exists".format(project_name)) + + project_id = entities[0]["metadata"]["uuid"] + + # Setting project reference + task_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_id, + "name": project_name, + } + + res, err = client.task.create(task_payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.error("Failed to create Task Library item {}".format(name)) + sys.exit(-1) + + LOG.info("Task Library item '{}' created successfully.".format(name)) + + return res, err + + +def get_library_task_classes(library_task_dsl=None): + """Get Task Library deployment classes""" + + task_library_item = None + if not library_task_dsl: + return [] + + tl_module = get_module_from_file("calm.dsl.library_task", library_task_dsl) + for item in dir(tl_module): + obj = getattr(tl_module, item) + if isinstance(obj, type(TaskType)): + if type(obj) == (TaskType): + task_library_item = obj + + return task_library_item + + +def create_library_task_payload(name, task_type, attrs, description, out_vars=None): + """Create Task Library payload""" + + task_resources = { + "type": task_type, + "variable_list": [], + } + + if task_type == "HTTP": + task_resources["attrs"] = attrs + else: + script_type = attrs.get("script_type") + script = attrs.get("script") + if out_vars: + out_vars = out_vars.split(",") + else: + out_vars = attrs.get("eval_variables", None) + + task_resources["attrs"] = {"script": script, "script_type": script_type} + + if out_vars: + task_resources["attrs"]["eval_variables"] = out_vars + + task_payload = { + "spec": { + "name": name, + "description": description or "", + "resources": task_resources, + }, + "metadata": {"spec_version": 1, "name": name, "kind": "app_task"}, + "api_version": "3.0", + } + + return task_payload + + +def compile_library_task(path_to_dsl): + """Compile Task Library item""" + + TaskLibraryItem = get_library_task_classes(path_to_dsl) + task_dict = TaskLibraryItem.get_dict() + + name = task_dict.get("name") + task_type = task_dict.get("type") + description = task_dict.get("description") + attrs = task_dict.get("attrs") + + task_payload = create_library_task_payload( + name, task_type, attrs, description, out_vars=None + ) + + return task_payload + + +def create_library_task_from_json( + client, path_to_json, name=None, description=None, force_create=False +): + """Create Task Library from json""" + + with open(path_to_json, "r") as f: + task_payload = json.loads(f.read()) + + if name: + task_payload["spec"]["name"] = name + else: + name = task_payload.get("spec").get("name") + + if description: + task_payload["spec"]["description"] = description + + return create_update_library_task( + client, + task_payload, + name=name, + force_create=force_create, + ) + + +def create_library_task_from_dsl( + client, path_to_dsl, name=None, description=None, force_create=False +): + """Create Task Library from DSL""" + + task_payload = compile_library_task(path_to_dsl) + + if name: + task_payload["spec"]["name"] = name + else: + name = task_payload.get("spec").get("name") + + if description: + task_payload["spec"]["description"] = description + + return create_update_library_task( + client, + task_payload, + name=name, + force_create=force_create, + ) + + +def create_library_task_using_script_file( + client, + task_file, + script_type, + task_type, + out_vars=None, + name=None, + description=None, + force_create=False, +): + """Create Task Library from Script""" + + with open(task_file, "r") as f: + task_file_content = f.read() + + if not name: + task_file_name = ntpath.basename(task_file) + name = os.path.splitext(task_file_name.replace(" ", "_"))[0] + + if task_file_content is None: + err_msg = "User task not found in {}".format(task_file) + err = {"error": err_msg, "code": -1} + return None, err + + attrs = { + "script_type": script_type, + "script": task_file_content, + } + + task_payload = create_library_task_payload( + name, task_type, attrs, description, out_vars=out_vars + ) + + return create_update_library_task( + client, + task_payload, + name=name, + force_create=force_create, + ) + + +def create_task(task_file, name, description, force): + """Creates a task library item""" + + client = get_api_client() + + if task_file.endswith(".json"): + + res, err = create_library_task_from_json( + client, task_file, name=name, description=description, force_create=force + ) + elif task_file.endswith(".py"): + + res, err = create_library_task_from_dsl( + client, task_file, name=name, description=description, force_create=force + ) + else: + LOG.error("Unknown file format {}".format(task_file)) + return + + if err: + LOG.error(err["error"]) + return + + +def import_task(task_file, name, description, out_vars, force): + """Imports a task library item""" + + client = get_api_client() + + if ( + task_file.endswith(".sh") + or task_file.endswith(".escript") + or task_file.endswith(".ps1") + ): + if task_file.endswith(".sh"): + script_type = TASKS.SCRIPT_TYPES.SHELL + elif task_file.endswith(".escript"): + script_type = TASKS.SCRIPT_TYPES.ESCRIPT + elif task_file.endswith(".ps1"): + script_type = TASKS.SCRIPT_TYPES.POWERSHELL + + if out_vars is not None: + task_type = TASKS.TASK_TYPES.SET_VARIABLE + else: + task_type = TASKS.TASK_TYPES.EXEC + + res, err = create_library_task_using_script_file( + client, + task_file, + script_type, + task_type, + out_vars=out_vars, + name=name, + description=description, + force_create=force, + ) + elif task_file.endswith(".py") or task_file.endswith(".json"): + LOG.error( + "Unknown file format. Please use 'calm create library task' command for (.py & .json)." + ) + return + else: + LOG.error("Unknown file format {}".format(task_file)) + return + + if err: + LOG.error(err["error"]) + return diff --git a/framework/calm/dsl/cli/library_tasks_commands.py b/framework/calm/dsl/cli/library_tasks_commands.py new file mode 100644 index 0000000..4e5b8c4 --- /dev/null +++ b/framework/calm/dsl/cli/library_tasks_commands.py @@ -0,0 +1,158 @@ +import click + +from calm.dsl.log import get_logging_handle + +from .main import ( + library_get, + library_import, + library_create, + library_describe, + library_delete, +) +from .library_tasks import ( + get_tasks_list, + describe_task, + delete_task, + create_task, + import_task, +) + +LOG = get_logging_handle(__name__) + + +@library_get.command("tasks") +@click.option( + "--name", "-n", default=None, help="Search for task from task library by name" +) +@click.option( + "--filter", + "filter_by", + "-f", + default=None, + help="Filter tasks from task library by this string", +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only task from task library names.", +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +def _get_tasks_list(name, filter_by, limit, offset, quiet, all_items): + """Get the task from task library, optionally filtered by a string""" + + get_tasks_list(name, filter_by, limit, offset, quiet, all_items) + + +@library_describe.command("task") +@click.argument("task_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_task(task_name, out): + """Describe a task from task library""" + + describe_task(task_name, out) + + +@library_delete.command("task") +@click.argument("task_names", nargs=-1) +def _delete_task(task_names): + """Deletes a task from task library""" + + delete_task(task_names) + + +@library_import.command("task") +@click.option( + "--file", + "-f", + "task_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of task file (.json, .sh, .escript, .ps1)", +) +@click.option("--name", "-n", default=None, help="Task Library item name (Optional)") +@click.option( + "--description", "-d", default=None, help="Task Library description (Optional)" +) +@click.option( + "--out-vars", + "-v", + "out_vars", + default=None, + help="Set-variable output variables coma seperated (,) (Optional)", +) +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="Updates existing task library item with the same name.", +) +def _import_task(task_file, name, description, out_vars, force): + + """Import task library item. + + (-f | --file) supports:\n + + \t.sh - Shell script file\n + \t.escript - Escript file\n + \t.ps1 - Powershell Script File\n + + Note:\n + To import Set-Variable task, use --out-vars="OUT1,OUT2". + + Examples:\n + calm import library task --name="Install IIS" -f Install_IIS.ps1\n + calm import library task -f Install_Docker.sh\n + calm import library task -f Install_Docker.sh --out-vars="IP_ADDRESS,PORT" """ + + import_task(task_file, name, description, out_vars, force) + + +@library_create.command("task") +@click.option( + "--file", + "-f", + "task_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of task file (.py/.json)", +) +@click.option("--name", "-n", default=None, help="Task Library item name (Optional)") +@click.option( + "--description", "-d", default=None, help="Task Library description (Optional)" +) +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="Updates existing task library item with the same name.", +) +def _create_task(task_file, name, description, force): + + """Create task library item. + + (-f | --file) supports:\n + \t.py - Python DSL\n + \t.json - Full json payload download from Calm API (v3 #GET) or using `calm describe library task -o json `\n + + Examples:\n + calm create library task --name=HTTPGetVM -f HTTPGetVM.py\n + calm create library task -f HTTPGetVM.json\n""" + + create_task(task_file, name, description, force) diff --git a/framework/calm/dsl/cli/main.py b/framework/calm/dsl/cli/main.py new file mode 100644 index 0000000..c18f462 --- /dev/null +++ b/framework/calm/dsl/cli/main.py @@ -0,0 +1,499 @@ +from ruamel import yaml +import click +import json +import copy + +import click_completion +import click_completion.core +from click_repl import repl +from prettytable import PrettyTable + +# TODO - move providers to separate file +from calm.dsl.providers import get_provider, get_provider_types +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.log import get_logging_handle +from calm.dsl.config import get_context +from calm.dsl.store import Cache + +from .version_validator import validate_version +from .click_options import simple_verbosity_option, show_trace_option +from .utils import FeatureFlagGroup, highlight_text + +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) + +click_completion.init() +LOG = get_logging_handle(__name__) + + +@click.group(cls=FeatureFlagGroup, context_settings=CONTEXT_SETTINGS) +@simple_verbosity_option(LOG) +@show_trace_option(LOG) +@click.option( + "--config", + "-c", + "config_file", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to config file, defaults to ~/.calm/config.ini", +) +@click.option( + "--sync", + "-s", + "sync", + is_flag=True, + default=False, + help="Update cache before running command", +) +@click.version_option("3.6.1") +@click.pass_context +def main(ctx, config_file, sync): + """Calm CLI + + \b + Commonly used commands: + calm get apps -> Get list of apps + calm get bps -> Get list of blueprints + calm launch bp --app_name Fancy-App-1 MyFancyBlueprint -> Launch a new app from an existing blueprint + calm create bp -f sample_bp.py --name Sample-App-3 -> Upload a new blueprint from a python DSL file + calm describe app Fancy-App-1 -> Describe an existing app + calm app Fancy-App-1 -w my_action -> Run an action on an app + calm get runbooks -> Get list of runbooks + calm describe runbook MyFancyRunbook -> Describe an existing runbook + calm create runbook -f sample_rb.py --name Sample-RB -> Upload a new runbook from a python DSL file + calm run runbook MyFancyRunbook -> Runs the existing runbook MyFancyRunbook + calm run runbook -f sample_rb.py -> Runs the runbook from a python DSL file + calm get execution_history -> Get list of runbook executions + calm get endpoints -> Get list of endpoints + calm create endpoint -f sample_ep.py --name Sample-Endpoint -> Upload a new endpoint from a python DSL file""" + ctx.ensure_object(dict) + ctx.obj["verbose"] = True + try: + validate_version() + except Exception: + LOG.debug("Could not validate version") + pass + if config_file: + ContextObj = get_context() + ContextObj.update_config_file_context(config_file=config_file) + if sync: + Cache.sync() + + +@main.group(cls=FeatureFlagGroup) +def validate(): + """Validate provider specs""" + pass + + +@validate.command("provider_spec") +@click.option( + "--file", + "-f", + "spec_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of provider spec file", +) +@click.option( + "--type", + "-t", + "provider_type", + type=click.Choice(get_provider_types()), + default="AHV_VM", + help="Provider type", +) +def validate_provider_spec(spec_file, provider_type): + """validates provider spec for given provider""" + + with open(spec_file) as f: + spec = yaml.safe_load(f.read()) + + try: + Provider = get_provider(provider_type) + Provider.validate_spec(spec) + + LOG.info("File {} is a valid {} spec.".format(spec_file, provider_type)) + except Exception as ee: + LOG.info("File {} is invalid {} spec".format(spec_file, provider_type)) + raise Exception(ee.message) + + +@main.group(cls=FeatureFlagGroup) +def get(): + """Get various things like blueprints, apps: `get apps`, `get bps`, `get endpoints` and `get runbooks` are the primary ones.""" + pass + + +@main.group(cls=FeatureFlagGroup) +@click.pass_context +def show(ctx): + """Shows the cached data(Dynamic data) etc.""" + pass + + +def make_default_short_help(help, max_length=45): + """Return a condensed version of help string.""" + if not help: + return "" + + words = help.split() + total_length = 0 + result = [] + done = False + + for word in words: + if word[-1:] == ".": + done = True + new_length = 1 + len(word) if result else len(word) + if total_length + new_length > max_length: + result.append("...") + done = True + else: + if result: + result.append(" ") + result.append(word) + if done: + break + total_length += new_length + + return "".join(result) + + +@show.command("commands") +@click.pass_context +def show_all_commands(ctx): + """show all commands of dsl cli""" + + ctx_root = ctx.find_root() + root_cmd = ctx_root.command + + commands_queue = [] + commands_res_list = [] + + for subcommand in root_cmd.list_commands(ctx): + cmd = root_cmd.get_command(ctx, subcommand) + + if isinstance(cmd, FeatureFlagGroup): + commands_queue.append([subcommand, cmd]) + else: + if root_cmd.experimental_cmd_map.get(subcommand, False): + is_experimental = True + else: + is_experimental = "-" + commands_res_list.append( + ( + subcommand, + getattr(cmd, "__doc__", ""), + root_cmd.feature_version_map.get(subcommand, "-"), + is_experimental, + ) + ) + + while commands_queue: + ele = commands_queue.pop(0) + grp = ele.pop(len(ele) - 1) + + for subcommand in grp.list_commands(ctx): + cmd = grp.get_command(ctx, subcommand) + + if isinstance(cmd, FeatureFlagGroup): + ele_temp = copy.deepcopy(ele) + ele_temp.extend([subcommand, cmd]) + commands_queue.append(ele_temp) + else: + ele_temp = copy.deepcopy(ele) + ele_temp.append(subcommand) + if grp.experimental_cmd_map.get(subcommand, False): + is_experimental = True + else: + is_experimental = "-" + commands_res_list.append( + ( + " ".join(ele_temp), + getattr(cmd, "__doc__", ""), + grp.feature_version_map.get(subcommand, "-"), + is_experimental, + ) + ) + + table = PrettyTable() + table.field_names = ["COMMAND", "HELP", "MIN COMMAND VERSION", "EXPERIMENTAL"] + + for cmd_tuple in commands_res_list: + cmd_str = "{} {}".format(ctx_root.command_path, cmd_tuple[0]) + cmd_help = make_default_short_help(cmd_tuple[1]) + table.add_row( + [ + highlight_text(cmd_str), + highlight_text(cmd_help), + highlight_text(cmd_tuple[2]), + highlight_text(cmd_tuple[3]), + ] + ) + + # left align the command column + table.align["COMMAND"] = "l" + click.echo(table) + + +@main.group(cls=FeatureFlagGroup) +def clear(): + """Clear the data stored in local db: cache, secrets etc.""" + pass + + +@main.group(cls=FeatureFlagGroup) +def init(): + """Initializes the dsl for basic configs and bp directory etc.""" + pass + + +@get.group(cls=FeatureFlagGroup) +def server(): + """Get calm server details""" + pass + + +@server.command("status") +def get_server_status(): + """Get calm server connection status""" + + LOG.info("Checking if Calm is enabled on Server") + client = get_api_client() + Obj = get_resource_api("services/nucalm/status", client.connection) + res, err = Obj.read() + + if err: + click.echo("[Fail]") + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + result = json.loads(res.content) + service_enablement_status = result["service_enablement_status"] + + res, err = client.version.get_calm_version() + calm_version = res.content.decode("utf-8") + + LOG.info(service_enablement_status) + LOG.info("Server URL: {}".format(client.connection.base_url)) + LOG.info("Calm Version: {}".format(calm_version)) + + res, err = client.version.get_pc_version() + if not err: + res = res.json() + pc_version = res["version"] + LOG.info("PC Version: {}".format(pc_version)) + + +@main.group(cls=FeatureFlagGroup) +def format(): + """Format blueprint using black""" + pass + + +@main.group(cls=FeatureFlagGroup) +def compile(): + """Compile blueprint to json / yaml""" + pass + + +@main.group(cls=FeatureFlagGroup) +def decompile(): + """ """ + pass + + +@main.group(cls=FeatureFlagGroup) +def create(): + """Create entities in Calm (blueprint, project, endpoint, runbook)""" + pass + + +@main.group(cls=FeatureFlagGroup) +def delete(): + """Delete entities""" + pass + + +@main.group(cls=FeatureFlagGroup) +def launch(): + """Launch blueprints to create Apps""" + pass + + +@main.group(cls=FeatureFlagGroup) +def publish(): + """Publish blueprints to marketplace""" + pass + + +@main.group(cls=FeatureFlagGroup) +def approve(): + """Approve blueprints in marketplace manager""" + pass + + +@main.group(cls=FeatureFlagGroup) +def unpublish(): + """Unpublish blueprints from marketplace""" + pass + + +@main.group(cls=FeatureFlagGroup) +def reject(): + """Reject blueprints from marketplace manager""" + pass + + +@main.group(cls=FeatureFlagGroup) +def describe(): + """Describe apps, blueprints, projects, accounts, endpoints, runbooks""" + pass + + +@main.group(cls=FeatureFlagGroup) +def run(): + """Run actions in an app or runbooks""" + pass + + +@main.group(cls=FeatureFlagGroup) +def watch(): + """Track actions running on apps or runbook executions""" + pass + + +@main.group(cls=FeatureFlagGroup) +def pause(): + """Pause running runbook executions""" + pass + + +@main.group(cls=FeatureFlagGroup) +def resume(): + """resume paused runbook executions""" + pass + + +@main.group(cls=FeatureFlagGroup) +def abort(): + """Abort runbook executions""" + pass + + +@main.group(cls=FeatureFlagGroup) +def reset(): + """Reset entity""" + + +@create.command("provider_spec") +@click.option( + "--type", + "provider_type", + "-t", + type=click.Choice(get_provider_types()), + default="AHV_VM", + help="Provider type", +) +def create_provider_spec(provider_type): + """Creates a provider_spec""" + + Provider = get_provider(provider_type) + Provider.create_spec() + + +@main.group(cls=FeatureFlagGroup) +def update(): + """Update entities""" + pass + + +@main.group(cls=FeatureFlagGroup) +def download(): + """Download entities""" + pass + + +completion_cmd_help = """Shell completion for click-completion-command +Available shell types: +\b + %s +Default type: auto +""" % "\n ".join( + "{:<12} {}".format(k, click_completion.core.shells[k]) + for k in sorted(click_completion.core.shells.keys()) +) + + +@main.group(cls=FeatureFlagGroup, help=completion_cmd_help) +def completion(): + pass + + +@main.command("prompt") +def calmrepl(): + """Enable an interactive prompt shell + + > :help + + REPL help: + + External Commands: + + prefix external commands with "!" + + Internal Commands: + + prefix internal commands with ":" + + :exit, :q, :quit exits the repl + + :?, :h, :help displays general help information""" + repl(click.get_current_context()) + + +@main.group(cls=FeatureFlagGroup) +def set(): + """Sets the entities""" + pass + + +@main.group("import", cls=FeatureFlagGroup) +def calm_import(): + """Import entities in Calm (task library)""" + pass + + +@get.group("library") +def library_get(): + """Get Library entities""" + pass + + +@create.group("library") +def library_create(): + """Create Library entities""" + pass + + +@calm_import.group("library") +def library_import(): + """Import Library entities""" + pass + + +@describe.group("library") +def library_describe(): + """Describe Library entities""" + pass + + +@delete.group("library") +def library_delete(): + """Delete Library entities""" + pass + + +@main.group(cls=FeatureFlagGroup) +def sync(): + """Sync platform account""" + pass diff --git a/framework/calm/dsl/cli/marketplace.py b/framework/calm/dsl/cli/marketplace.py new file mode 100644 index 0000000..35f9793 --- /dev/null +++ b/framework/calm/dsl/cli/marketplace.py @@ -0,0 +1,1997 @@ +import uuid +import click +import sys +import json +import os + +from prettytable import PrettyTable +from distutils.version import LooseVersion as LV + +from calm.dsl.builtins import BlueprintType, get_valid_identifier +from calm.dsl.decompile.decompile_render import create_bp_dir +from calm.dsl.decompile.file_handler import get_bp_dir +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.config import get_context + +from .utils import highlight_text, get_states_filter, Display +from .bps import launch_blueprint_simple, get_blueprint +from .runbooks import get_runbook, poll_action, watch_runbook +from .apps import watch_app +from .runlog import get_runlog_status +from .endpoints import get_endpoint +from calm.dsl.builtins.models.helper.common import get_project +from .environments import get_project_environment +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Version +from .constants import MARKETPLACE_ITEM + +LOG = get_logging_handle(__name__) +APP_STATES = [ + MARKETPLACE_ITEM.STATES.PENDING, + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.REJECTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, +] +APP_SOURCES = [ + MARKETPLACE_ITEM.SOURCES.GLOBAL, + MARKETPLACE_ITEM.SOURCES.LOCAL, +] + + +def get_app_family_list(): + """returns the app family list categories""" + + client = get_api_client() + Obj = get_resource_api("categories/AppFamily", client.connection) + + res, err = Obj.list(params={}) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + categories = [] + + for entity in res["entities"]: + categories.append(entity["value"]) + + return categories + + +def get_group_data_value(data_list, field, value_list=False): + """to find the field value in group api call + return whole list of values if value_list is True + """ + + for entity in data_list: + if entity["name"] == field: + entity_value = entity["values"] + if not entity_value: + return None + + return ( + entity_value[0]["values"] + if value_list + else entity_value[0]["values"][0] + ) + + return None + + +def trunc_string(data=None, max_length=50): + + if not data: + return "-" + + if len(data) > max_length: + return data[: max_length - 1] + "..." + + return data + + +def get_mpis_group_call( + name=None, + app_family="All", + app_states=[], + group_member_count=0, + app_source=None, + app_group_uuid=None, + type=None, + filter_by="", +): + """ + To call groups() api for marketplace items + if group_member_count is 0, it will not apply the group_count filter + """ + + client = get_api_client() + filter = "marketplace_item_type_list==APP" + + if app_states: + filter += get_states_filter(state_key="app_state", states=app_states) + + if app_family != "All": + filter += ";category_name==AppFamily;category_value=={}".format(app_family) + + if filter_by: + filter = filter + ";(" + filter_by + ")" + + if name: + filter += ";name=={}".format(name) + + if app_source: + filter += ";app_source=={}".format(app_source) + + if app_group_uuid: + filter += ";app_group_uuid=={}".format(app_group_uuid) + + CALM_VERSION = Version.get_version("Calm") + if type and LV(CALM_VERSION) >= LV("3.2.0"): + filter += ";type=={}".format(type) + + payload = { + "group_member_sort_attribute": "version", + "group_member_sort_order": "DESCENDING", + "grouping_attribute": "app_group_uuid", + "group_count": 64, + "group_offset": 0, + "filter_criteria": filter, + "entity_type": "marketplace_item", + "group_member_attributes": [ + {"attribute": "name"}, + {"attribute": "type"}, + {"attribute": "author"}, + {"attribute": "version"}, + {"attribute": "categories"}, + {"attribute": "owner_reference"}, + {"attribute": "owner_username"}, + {"attribute": "project_names"}, + {"attribute": "project_uuids"}, + {"attribute": "app_state"}, + {"attribute": "description"}, + {"attribute": "spec_version"}, + {"attribute": "app_attribute_list"}, + {"attribute": "app_group_uuid"}, + {"attribute": "icon_list"}, + {"attribute": "change_log"}, + {"attribute": "app_source"}, + ], + } + + if group_member_count: + payload["group_member_count"] = group_member_count + + # TODO Create GroupAPI separately for it. + Obj = get_resource_api("groups", client.connection) + res, err = Obj.create(payload=payload) + + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + return res + + +def get_marketplace_store_items( + name, quiet, app_family, display_all, filter_by="", type=None +): + """Lists marketplace store items""" + + group_member_count = 0 + if not display_all: + group_member_count = 1 + + res = get_mpis_group_call( + name=name, + app_family=app_family, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + group_member_count=group_member_count, + filter_by=filter_by, + type=type, + ) + group_results = res["group_results"] + + if quiet: + for group in group_results: + entity_results = group["entity_results"] + entity_data = entity_results[0]["data"] + click.echo(highlight_text(get_group_data_value(entity_data, "name"))) + return + + table = PrettyTable() + field_names = ["NAME", "TYPE", "DESCRIPTION", "AUTHOR", "APP_SOURCE"] + if display_all: + field_names.insert(1, "VERSION") + field_names.insert(2, "AVAILABLE TO") + field_names.append("UUID") + + table.field_names = field_names + + for group in group_results: + entity_results = group["entity_results"] + + for entity in entity_results: + entity_data = entity["data"] + project_names = get_group_data_value( + entity_data, "project_names", value_list=True + ) + available_to = "-" + if project_names: + project_count = len(project_names) + if project_count == 1: + available_to = "{} Project".format(project_count) + else: + available_to = "{} Projects".format(project_count) + + data_row = [ + highlight_text(get_group_data_value(entity_data, "name")), + highlight_text(get_group_data_value(entity_data, "type")), + highlight_text( + trunc_string(get_group_data_value(entity_data, "description")) + ), + highlight_text(get_group_data_value(entity_data, "author")), + highlight_text(get_group_data_value(entity_data, "app_source")), + ] + + if display_all: + data_row.insert( + 1, highlight_text(get_group_data_value(entity_data, "version")) + ) + data_row.insert(2, highlight_text(available_to)) + data_row.append(highlight_text(entity["entity_id"])) + + table.add_row(data_row) + + click.echo(table) + + +def get_marketplace_items( + name, quiet, app_family, app_states=[], filter_by="", type=None +): + """List all the marketlace items listed in the manager""" + + res = get_mpis_group_call( + name=name, + app_family=app_family, + app_states=app_states, + filter_by=filter_by, + type=type, + ) + group_results = res["group_results"] + + if quiet: + for group in group_results: + entity_results = group["entity_results"] + entity_data = entity_results[0]["data"] + click.echo(highlight_text(get_group_data_value(entity_data, "name"))) + return + + table = PrettyTable() + field_names = [ + "NAME", + "TYPE", + "APP_SOURCE", + "OWNER", + "AUTHOR", + "AVAILABLE TO", + "VERSION", + "CATEGORY", + "STATUS", + "UUID", + ] + + table.field_names = field_names + + for group in group_results: + entity_results = group["entity_results"] + + for entity in entity_results: + entity_data = entity["data"] + project_names = get_group_data_value( + entity_data, "project_names", value_list=True + ) + available_to = "-" + if project_names: + project_count = len(project_names) + if project_count == 1: + available_to = "{} Project".format(project_count) + else: + available_to = "{} Projects".format(project_count) + + categories = get_group_data_value(entity_data, "categories") + category = "-" + if categories: + category = categories.split(":")[1] + + owner = get_group_data_value(entity_data, "owner_username") + if not owner: + owner = "-" + + data_row = [ + highlight_text(get_group_data_value(entity_data, "name")), + highlight_text(get_group_data_value(entity_data, "type")), + highlight_text(get_group_data_value(entity_data, "app_source")), + highlight_text(owner), + highlight_text(get_group_data_value(entity_data, "author")), + highlight_text(available_to), + highlight_text(get_group_data_value(entity_data, "version")), + highlight_text(category), + highlight_text(get_group_data_value(entity_data, "app_state")), + highlight_text(entity["entity_id"]), + ] + + table.add_row(data_row) + + click.echo(table) + + +def get_mpi_latest_version(name, app_source=None, app_states=[], type=None): + + res = get_mpis_group_call( + name=name, + app_states=app_states, + group_member_count=1, + app_source=app_source, + type=type, + ) + group_results = res["group_results"] + + if not group_results: + LOG.error("No Marketplace Item found with name {}".format(name)) + sys.exit(-1) + + entity_results = group_results[0]["entity_results"] + entity_version = get_group_data_value(entity_results[0]["data"], "version") + + return entity_version + + +def get_mpi_by_name_n_version(name, version, app_states=[], app_source=None, type=None): + """ + It will fetch marketplace item with particular version. + Special case: As blueprint with state REJECTED and other can coexist with same name and version + """ + + client = get_api_client() + filter = "name==" + name + ";version==" + version + + if app_states: + filter += get_states_filter(state_key="app_state", states=app_states) + + if app_source: + filter += ";app_source=={}".format(app_source) + + CALM_VERSION = Version.get_version("Calm") + if type and LV(CALM_VERSION) >= LV("3.2.0"): + filter += ";type=={}".format(type) + + payload = {"length": 250, "filter": filter} + + LOG.debug("Calling list api on marketplace_items") + res, err = client.market_place.list(params=payload) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + if not res["entities"]: + LOG.error( + "No Marketplace Item found with name {} and version {}".format( + name, version + ) + ) + sys.exit(-1) + + app_uuid = res["entities"][0]["metadata"]["uuid"] + LOG.debug("Reading marketplace_item with uuid {}".format(app_uuid)) + res, err = client.market_place.read(app_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + res = res.json() + return res + + +def describe_marketplace_store_item( + name, out, version=None, app_source=None, type=None +): + """describes the marketplace blueprint related to marketplace item""" + + describe_marketplace_item( + name=name, + out=out, + version=version, + app_source=app_source, + app_state=MARKETPLACE_ITEM.STATES.PUBLISHED, + type=type, + ) + + +def describe_marketplace_item( + name, out, version=None, app_source=None, app_state=None, type=None +): + """describes the marketplace item""" + + CALM_VERSION = Version.get_version("Calm") + + app_states = [app_state] if app_state else [] + if not version: + LOG.info("Fetching latest version of Marketplace Item {} ".format(name)) + version = get_mpi_latest_version( + name=name, app_source=app_source, app_states=app_states, type=type + ) + LOG.info(version) + + LOG.info("Fetching details of Marketplace Item {}".format(name)) + mpi = get_mpi_by_name_n_version( + name=name, + version=version, + app_states=app_states, + app_source=app_source, + type=type, + ) + + if out == "json": + blueprint = mpi["status"]["resources"]["app_blueprint_template"] + blueprint.pop("status", None) + click.echo(json.dumps(blueprint, indent=4, separators=(",", ": "))) + return + + click.echo("\n----MarketPlace Item Summary----\n") + click.echo( + "Name: " + + highlight_text(name) + + " (uuid: " + + highlight_text(mpi["metadata"]["uuid"]) + + ")" + ) + + if LV(CALM_VERSION) >= LV("3.2.0"): + click.echo("Type: " + highlight_text(mpi["status"]["resources"]["type"])) + + click.echo("Description: " + highlight_text(mpi["status"]["description"])) + click.echo("App State: " + highlight_text(mpi["status"]["resources"]["app_state"])) + click.echo("Author: " + highlight_text(mpi["status"]["resources"]["author"])) + + project_name_list = mpi["status"]["resources"]["project_reference_list"] + click.echo( + "Projects shared with [{}]: ".format(highlight_text(len(project_name_list))) + ) + for project in project_name_list: + click.echo("\t{}".format(highlight_text(project["name"]))) + + categories = mpi["metadata"].get("categories", {}) + if categories: + click.echo("Categories [{}]: ".format(highlight_text(len(categories)))) + for key, value in categories.items(): + click.echo("\t {} : {}".format(highlight_text(key), highlight_text(value))) + + change_log = mpi["status"]["resources"]["change_log"] + if not change_log: + change_log = "No logs present" + + click.echo("Change Log: " + highlight_text(change_log)) + click.echo("Version: " + highlight_text(mpi["status"]["resources"]["version"])) + click.echo( + "App Source: " + highlight_text(mpi["status"]["resources"]["app_source"]) + ) + + mpi_type = MARKETPLACE_ITEM.TYPES.BLUEPRINT + if LV(CALM_VERSION) >= LV("3.2.0"): + mpi_type = mpi["status"]["resources"]["type"] + + if mpi_type == MARKETPLACE_ITEM.TYPES.BLUEPRINT: + blueprint_template = mpi["status"]["resources"]["app_blueprint_template"] + action_list = blueprint_template["status"]["resources"]["app_profile_list"][0][ + "action_list" + ] + click.echo("App actions [{}]: ".format(highlight_text(len(action_list)))) + for action in action_list: + click.echo("\t{} : ".format(highlight_text(action["name"])), nl=False) + click.echo( + highlight_text( + action["description"] + if action["description"] + else "No description avaiable" + ) + ) + else: + published_with_endpoint = mpi["status"]["resources"]["runbook_template_info"][ + "is_published_with_endpoints" + ] + published_with_secret = mpi["status"]["resources"]["runbook_template_info"][ + "is_published_with_secrets" + ] + click.echo( + "Published with Endpoints: " + highlight_text(published_with_endpoint) + ) + click.echo("Published with Secrets:: " + highlight_text(published_with_secret)) + + +def launch_marketplace_bp( + name, + version, + project, + environment, + app_name=None, + profile_name=None, + patch_editables=True, + app_source=None, + launch_params=None, + watch=False, + poll_interval=10, +): + """ + Launch marketplace blueprints + If version not there search in published, pendingm, accepted blueprints + """ + + if not version: + LOG.info("Fetching latest version of Marketplace Blueprint {} ".format(name)) + version = get_mpi_latest_version( + name=name, + app_source=app_source, + app_states=[ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ], + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + LOG.info(version) + + LOG.info("Converting MPI to blueprint") + bp_payload = convert_mpi_into_blueprint( + name=name, + version=version, + project_name=project, + environment_name=environment, + app_source=app_source, + ) + + app_name = app_name or "Mpi-App-{}-{}".format(name, str(uuid.uuid4())[-10:]) + launch_blueprint_simple( + patch_editables=patch_editables, + profile_name=profile_name, + app_name=app_name, + blueprint=bp_payload, + launch_params=launch_params, + ) + LOG.info("App {} creation is successful".format(app_name)) + + if watch: + + def display_action(screen): + watch_app(app_name, screen, poll_interval=poll_interval) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + LOG.info("Action runs completed for app {}".format(app_name)) + + +def decompile_marketplace_bp( + name, version, app_source, bp_name, project, with_secrets, bp_dir +): + """decompiles marketplace blueprint""" + + if not version: + LOG.info("Fetching latest version of Marketplace Blueprint {} ".format(name)) + version = get_mpi_latest_version( + name=name, app_source=app_source, type=MARKETPLACE_ITEM.TYPES.BLUEPRINT + ) + LOG.info(version) + + LOG.info("Converting MPI into blueprint") + bp_payload = convert_mpi_into_blueprint( + name=name, version=version, project_name=project, app_source=app_source + ) + del bp_payload["status"] + + client = get_api_client() + blueprint_uuid = bp_payload["metadata"]["uuid"] + res, err = client.blueprint.export_file(blueprint_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + bp_payload = res.json() + blueprint = bp_payload["spec"]["resources"] + blueprint_name = get_valid_identifier(bp_name or name) + + if not bp_dir: + bp_dir_suffix = bp_name or "mpi_bp_{}_v{}".format(blueprint_name, version) + bp_dir = os.path.join(os.getcwd(), bp_dir_suffix) + + blueprint_description = bp_payload["spec"].get("description", "") + LOG.info("Decompiling marketplace blueprint {}".format(name)) + for sub_obj in blueprint.get("substrate_definition_list"): + sub_type = sub_obj.get("type", "") or "AHV_VM" + if sub_type == "K8S_POD": + raise NotImplementedError( + "Decompilation for k8s pod is not supported right now" + ) + elif sub_type != "AHV_VM": + LOG.warning( + "Decompilation support for providers other than AHV is experimental." + ) + break + + bp_cls = BlueprintType.decompile(blueprint) + bp_cls.__name__ = blueprint_name + bp_cls.__doc__ = blueprint_description + + create_bp_dir(bp_cls=bp_cls, bp_dir=bp_dir, with_secrets=with_secrets) + click.echo( + "\nSuccessfully decompiled. Directory location: {}. Blueprint location: {}".format( + get_bp_dir(), os.path.join(get_bp_dir(), "blueprint.py") + ) + ) + + +def launch_marketplace_item( + name, + version, + project, + environment, + app_name=None, + profile_name=None, + patch_editables=True, + app_source=None, + launch_params=None, + watch=False, + poll_interval=10, +): + """ + Launch marketplace items + If version not there search in published blueprints + """ + + client = get_api_client() + + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) >= LV("3.2.0"): + params = { + "filter": "name=={};type=={}".format(name, MARKETPLACE_ITEM.TYPES.BLUEPRINT) + } + mp_item_map = client.market_place.get_name_uuid_map(params=params) + if not mp_item_map: + LOG.error("No marketplace blueprint found with name {}".format(name)) + sys.exit(-1) + + if not version: + LOG.info("Fetching latest version of Marketplace Item {} ".format(name)) + version = get_mpi_latest_version( + name=name, + app_source=app_source, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + LOG.info(version) + + LOG.info("Converting MPI to blueprint") + bp_payload = convert_mpi_into_blueprint( + name=name, + version=version, + project_name=project, + environment_name=environment, + app_source=app_source, + ) + + app_name = app_name or "Mpi-App-{}-{}".format(name, str(uuid.uuid4())[-10:]) + launch_blueprint_simple( + patch_editables=patch_editables, + profile_name=profile_name, + app_name=app_name, + blueprint=bp_payload, + launch_params=launch_params, + ) + LOG.info("App {} creation is successful".format(app_name)) + + if watch: + + def display_action(screen): + watch_app(app_name, screen, poll_interval=poll_interval) + screen.wait_for_input(10.0) + + Display.wrapper(display_action, watch=True) + LOG.info("Action runs completed for app {}".format(app_name)) + + +def convert_mpi_into_blueprint( + name, version, project_name=None, environment_name=None, app_source=None +): + + client = get_api_client() + context = get_context() + project_config = context.get_project_config() + + project_name = project_name or project_config["name"] + environment_data, project_data = get_project_environment( + name=environment_name, project_name=project_name + ) + project_uuid = project_data["metadata"]["uuid"] + environments = project_data["status"]["resources"]["environment_reference_list"] + if not environments: + LOG.error("No environment registered to project '{}'".format(project_name)) + sys.exit(-1) + + # Added in 3.2 + default_environment_uuid = ( + project_data["status"]["resources"] + .get("default_environment_reference", {}) + .get("uuid") + ) + + # If there is no default environment, select first one + default_environment_uuid = default_environment_uuid or environments[0]["uuid"] + + env_uuid = "" + if environment_data: # if user supplies environment + env_uuid = environment_data["metadata"]["uuid"] + else: + env_uuid = default_environment_uuid + + LOG.info("Fetching MPI details") + mpi_data = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=app_source, + app_states=[ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ], + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + # If BP is in published state, provided project should be associated with the bp + app_state = mpi_data["status"]["resources"]["app_state"] + if app_state == MARKETPLACE_ITEM.STATES.PUBLISHED: + project_ref_list = mpi_data["status"]["resources"].get( + "project_reference_list", [] + ) + ref_projects = [] + for project in project_ref_list: + ref_projects.append(project["name"]) + + if project_name not in ref_projects: + LOG.debug("Associated Projects: {}".format(ref_projects)) + LOG.error( + "Project {} is not shared with marketplace item {} with version {}".format( + project_name, name, version + ) + ) + sys.exit(-1) + + bp_spec = {} + bp_spec["spec"] = mpi_data["spec"]["resources"]["app_blueprint_template"]["spec"] + del bp_spec["spec"]["name"] + bp_spec["spec"]["environment_uuid"] = env_uuid + + bp_spec["spec"]["app_blueprint_name"] = "Mpi-Bp-{}-{}".format( + name, str(uuid.uuid4())[-10:] + ) + + bp_spec["metadata"] = { + "kind": "blueprint", + "project_reference": {"kind": "project", "uuid": project_uuid}, + "categories": mpi_data["metadata"].get("categories", {}), + } + bp_spec["api_version"] = "3.0" + + LOG.info("Creating MPI blueprint") + bp_res, err = client.blueprint.marketplace_launch(bp_spec) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + bp_res = bp_res.json() + del bp_res["spec"]["environment_uuid"] + bp_status = bp_res["status"]["state"] + if bp_status != "ACTIVE": + LOG.error("Blueprint went to {} state".format(bp_status)) + sys.exit(-1) + + return bp_res + + +def publish_bp_to_marketplace_manager( + bp_name, + marketplace_bp_name, + version, + description="", + with_secrets=False, + app_group_uuid=None, + icon_name=None, + icon_file=None, +): + + client = get_api_client() + context = get_context() + server_config = context.get_server_config() + + bp = get_blueprint(bp_name) + bp_uuid = bp.get("metadata", {}).get("uuid", "") + + LOG.info("Fetching blueprint details") + if with_secrets: + bp_data, err = client.blueprint.export_json_with_secrets(bp_uuid) + + else: + bp_data, err = client.blueprint.export_json(bp_uuid) + + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + bp_data = bp_data.json() + bp_status = bp_data["status"]["state"] + if bp_status != "ACTIVE": + LOG.error( + "Blueprint is in {} state. Unable to publish it to marketplace manager".format( + bp_status + ) + ) + sys.exit(-1) + + bp_template = { + "spec": { + "name": marketplace_bp_name, + "description": description, + "resources": { + "app_attribute_list": ["FEATURED"], + "icon_reference_list": [], + "author": server_config["pc_username"], + "version": version, + "app_group_uuid": app_group_uuid or str(uuid.uuid4()), + "app_blueprint_template": { + "status": bp_data["status"], + "spec": bp_data["spec"], + }, + }, + }, + "api_version": "3.0", + "metadata": {"kind": "marketplace_item"}, + } + + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) >= LV("3.2.0"): + bp_template["spec"]["resources"]["type"] = MARKETPLACE_ITEM.TYPES.BLUEPRINT + + if icon_name: + if icon_file: + # If file is there, upload first and then use it for marketplace item + client.app_icon.upload(icon_name, icon_file) + + app_icon_name_uuid_map = client.app_icon.get_name_uuid_map() + app_icon_uuid = app_icon_name_uuid_map.get(icon_name, None) + if not app_icon_uuid: + LOG.error("App icon: {} not found".format(icon_name)) + sys.exit(-1) + + bp_template["spec"]["resources"]["icon_reference_list"] = [ + { + "icon_type": "ICON", + "icon_reference": {"kind": "file_item", "uuid": app_icon_uuid}, + } + ] + + res, err = client.market_place.create(bp_template) + LOG.debug("Api response: {}".format(res.json())) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info("Marketplace Blueprint is published to marketplace manager successfully") + + +def publish_bp_as_new_marketplace_bp( + bp_name, + marketplace_bp_name, + version, + description="", + with_secrets=False, + publish_to_marketplace=False, + auto_approve=False, + projects=[], + category=None, + icon_name=None, + icon_file=None, + all_projects=False, +): + + # Search whether this marketplace item exists or not + LOG.info( + "Fetching existing marketplace blueprints with name {}".format( + marketplace_bp_name + ) + ) + res = get_mpis_group_call( + name=marketplace_bp_name, + group_member_count=1, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + group_count = res["filtered_group_count"] + + if group_count: + LOG.error( + "A local marketplace item exists with same name ({}) in another app family".format( + marketplace_bp_name + ) + ) + sys.exit(-1) + + publish_bp_to_marketplace_manager( + bp_name=bp_name, + marketplace_bp_name=marketplace_bp_name, + version=version, + description=description, + with_secrets=with_secrets, + icon_name=icon_name, + icon_file=icon_file, + ) + + if publish_to_marketplace or auto_approve: + if not projects: + context = get_context() + project_config = context.get_project_config() + projects = [project_config["name"]] + + approve_marketplace_item( + name=marketplace_bp_name, + version=version, + projects=projects, + category=category, + all_projects=all_projects, + ) + + if publish_to_marketplace: + publish_marketplace_item( + name=marketplace_bp_name, + version=version, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + + +def publish_bp_as_existing_marketplace_bp( + bp_name, + marketplace_bp_name, + version, + description="", + with_secrets=False, + publish_to_marketplace=False, + auto_approve=False, + projects=[], + category=None, + icon_name=None, + icon_file=None, + all_projects=False, +): + + LOG.info( + "Fetching existing marketplace blueprints with name {}".format( + marketplace_bp_name + ) + ) + res = get_mpis_group_call( + name=marketplace_bp_name, + group_member_count=1, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + group_results = res["group_results"] + if not group_results: + LOG.error( + "No local marketplace blueprint exists with name {}".format( + marketplace_bp_name + ) + ) + sys.exit(-1) + + entity_group = group_results[0] + app_group_uuid = entity_group["group_by_column_value"] + + # Search whether given version of marketplace items already exists or not + # Rejected MPIs with same name and version can exist + LOG.info( + "Fetching existing versions of Marketplace Item {}".format(marketplace_bp_name) + ) + res = get_mpis_group_call( + app_group_uuid=app_group_uuid, + app_states=[ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ], + ) + + group_results = res["group_results"] + entity_results = group_results[0]["entity_results"] + + for entity in entity_results: + entity_version = get_group_data_value(entity["data"], "version") + entity_app_state = get_group_data_value(entity["data"], "app_state") + + if entity_version == version: + LOG.error( + "An item exists with same version ({}) and app_state ({}) in the chosen app family.".format( + entity_version, entity_app_state + ) + ) + sys.exit(-1) + + publish_bp_to_marketplace_manager( + bp_name=bp_name, + marketplace_bp_name=marketplace_bp_name, + version=version, + description=description, + with_secrets=with_secrets, + app_group_uuid=app_group_uuid, + icon_name=icon_name, + icon_file=icon_file, + ) + + if publish_to_marketplace or auto_approve: + if not projects: + context = get_context() + project_config = context.get_project_config() + projects = [project_config["name"]] + + approve_marketplace_item( + name=marketplace_bp_name, + version=version, + projects=projects, + category=category, + all_projects=all_projects, + ) + + if publish_to_marketplace: + publish_marketplace_item( + name=marketplace_bp_name, + version=version, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + + +def approve_marketplace_item( + name, + version=None, + projects=[], + category=None, + all_projects=False, + type=None, +): + + client = get_api_client() + if not version: + # Search for pending items, Only those items can be approved + LOG.info("Fetching latest version of Marketplace Item {} ".format(name)) + version = get_mpi_latest_version( + name=name, + app_states=[MARKETPLACE_ITEM.STATES.PENDING], + type=type, + ) + LOG.info(version) + + LOG.info( + "Fetching details of pending marketplace item {} with version {}".format( + name, version + ) + ) + item = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + app_states=[MARKETPLACE_ITEM.STATES.PENDING], + type=type, + ) + item_uuid = item["metadata"]["uuid"] + item_type = MARKETPLACE_ITEM.TYPES.BLUEPRINT + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) >= LV("3.2.0"): + item_type = item["status"]["resources"]["type"] + + if item_type == MARKETPLACE_ITEM.TYPES.BLUEPRINT: + item_status = item["status"]["resources"]["app_blueprint_template"]["status"][ + "state" + ] + if item_status != "ACTIVE": + LOG.error("Item is in {} state. Unable to approve it".format(item_status)) + sys.exit(-1) + + res, err = client.market_place.read(item_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + item_data = res.json() + item_data.pop("status", None) + item_data["api_version"] = "3.0" + item_data["spec"]["resources"]["app_state"] = MARKETPLACE_ITEM.STATES.ACCEPTED + + if category: + app_families = get_app_family_list() + if category not in app_families: + LOG.error("{} is not a valid App Family category".format(category)) + sys.exit(-1) + + item_data["metadata"]["categories"] = {"AppFamily": category} + + if not item_data["spec"]["resources"].get("project_reference_list", {}): + item_data["spec"]["resources"]["project_reference_list"] = [] + + project_name_uuid_map = client.project.get_name_uuid_map(params={"length": 250}) + if all_projects: + for k, v in project_name_uuid_map.items(): + item_data["spec"]["resources"]["project_reference_list"].append( + { + "kind": "project", + "name": k, + "uuid": v, + } + ) + + else: + for _project in projects: + item_data["spec"]["resources"]["project_reference_list"].append( + { + "kind": "project", + "name": _project, + "uuid": project_name_uuid_map[_project], + } + ) + + res, err = client.market_place.update(uuid=item_uuid, payload=item_data) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info( + "Marketplace Item {} with version {} is approved successfully".format( + name, version + ) + ) + + +def publish_marketplace_item( + name, + version=None, + projects=[], + category=None, + app_source=None, + all_projects=False, + type=None, +): + + client = get_api_client() + if not version: + # Search for accepted items, only those items can be published + LOG.info( + "Fetching latest version of accepted Marketplace Item {} ".format(name) + ) + version = get_mpi_latest_version( + name=name, + app_states=[MARKETPLACE_ITEM.STATES.ACCEPTED], + app_source=app_source, + type=type, + ) + LOG.info(version) + + LOG.info( + "Fetching details of accepted marketplace item {} with version {}".format( + name, version + ) + ) + item = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=app_source, + app_states=[MARKETPLACE_ITEM.STATES.ACCEPTED], + type=type, + ) + item_uuid = item["metadata"]["uuid"] + item_type = MARKETPLACE_ITEM.TYPES.BLUEPRINT + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) >= LV("3.2.0"): + item_type = item["status"]["resources"]["type"] + + if item_type == MARKETPLACE_ITEM.TYPES.BLUEPRINT: + item_status = item["status"]["resources"]["app_blueprint_template"]["status"][ + "state" + ] + if item_status != "ACTIVE": + LOG.error( + "Item is in {} state. Unable to publish it to marketplace".format( + item_status + ) + ) + sys.exit(-1) + + res, err = client.market_place.read(item_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + item_data = res.json() + item_data.pop("status", None) + item_data["api_version"] = "3.0" + item_data["spec"]["resources"]["app_state"] = MARKETPLACE_ITEM.STATES.PUBLISHED + + if category: + app_families = get_app_family_list() + if category not in app_families: + LOG.error("{} is not a valid App Family category".format(category)) + sys.exit(-1) + + item_data["metadata"]["categories"] = {"AppFamily": category} + + if projects or all_projects: + # Clear the stored projects + item_data["spec"]["resources"]["project_reference_list"] = [] + project_name_uuid_map = client.project.get_name_uuid_map(params={"length": 250}) + + if all_projects: + for k, v in project_name_uuid_map.items(): + item_data["spec"]["resources"]["project_reference_list"].append( + { + "kind": "project", + "name": k, + "uuid": v, + } + ) + else: + for _project in projects: + item_data["spec"]["resources"]["project_reference_list"].append( + { + "kind": "project", + "name": _project, + "uuid": project_name_uuid_map[_project], + } + ) + + # Atleast 1 project required for publishing to marketplace + if not item_data["spec"]["resources"].get("project_reference_list", None): + LOG.error("To publish to the Marketplace, please provide a project first.") + sys.exit(-1) + + res, err = client.market_place.update(uuid=item_uuid, payload=item_data) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info("Marketplace Item is published to marketplace successfully") + + +def update_marketplace_item( + name, + version, + category=None, + projects=[], + description=None, + app_source=None, + type=None, +): + """ + updates the marketplace item + version is required to prevent unwanted update of another mpi + """ + + client = get_api_client() + + LOG.info( + "Fetching details of marketplace item {} with version {}".format(name, version) + ) + mpi_data = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=app_source, + app_states=[ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ], + type=type, + ) + item_uuid = mpi_data["metadata"]["uuid"] + + res, err = client.market_place.read(item_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + item_data = res.json() + item_data.pop("status", None) + item_data["api_version"] = "3.0" + + if category: + app_families = get_app_family_list() + if category not in app_families: + LOG.error("{} is not a valid App Family category".format(category)) + sys.exit(-1) + + item_data["metadata"]["categories"] = {"AppFamily": category} + + if projects: + # Clear all stored projects + item_data["spec"]["resources"]["project_reference_list"] = [] + for project in projects: + project_data = get_project(project) + + item_data["spec"]["resources"]["project_reference_list"].append( + { + "kind": "project", + "name": project, + "uuid": project_data["metadata"]["uuid"], + } + ) + + if description: + item_data["spec"]["description"] = description + + res, err = client.market_place.update(uuid=item_uuid, payload=item_data) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info( + "Marketplace Item {} with version {} is updated successfully".format( + name, version + ) + ) + + +def delete_marketplace_item( + name, + version, + app_source=None, + app_state=None, + type=None, +): + + client = get_api_client() + + if app_state == MARKETPLACE_ITEM.STATES.PUBLISHED: + LOG.error("Unpublish MPI {} first to delete it".format(name)) + sys.exit(-1) + + app_states = ( + [app_state] + if app_state + else [ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.REJECTED, + MARKETPLACE_ITEM.STATES.PENDING, + ] + ) + + LOG.info( + "Fetching details of unpublished marketplace item {} with version {}".format( + name, version + ) + ) + mpi_data = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=app_source, + app_states=app_states, + type=type, + ) + item_uuid = mpi_data["metadata"]["uuid"] + + res, err = client.market_place.delete(item_uuid) + LOG.debug("Api response: {}".format(res.json())) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info( + "Marketplace Item {} with version {} is deleted successfully".format( + name, version + ) + ) + + +def reject_marketplace_item(name, version, type=None): + + client = get_api_client() + if not version: + # Search for pending items, Only those items can be rejected + LOG.info("Fetching latest version of pending Marketplace Item {} ".format(name)) + version = get_mpi_latest_version( + name=name, + app_states=[MARKETPLACE_ITEM.STATES.PENDING], + type=type, + ) + LOG.info(version) + + # Pending BP will always of type LOCAL, so no need to apply that filter + LOG.info( + "Fetching details of pending marketplace item {} with version {}".format( + name, version + ) + ) + item = get_mpi_by_name_n_version( + name=name, + version=version, + app_states=[MARKETPLACE_ITEM.STATES.PENDING], + type=type, + ) + item_uuid = item["metadata"]["uuid"] + + res, err = client.market_place.read(item_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + item_data = res.json() + item_data.pop("status", None) + item_data["api_version"] = "3.0" + item_data["spec"]["resources"]["app_state"] = MARKETPLACE_ITEM.STATES.REJECTED + + res, err = client.market_place.update(uuid=item_uuid, payload=item_data) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info( + "Marketplace Item {} with version {} is rejected successfully".format( + name, version + ) + ) + + +def unpublish_marketplace_item(name, version, app_source=None, type=None): + + client = get_api_client() + if not version: + # Search for published items, only those can be unpublished + LOG.info( + "Fetching latest version of published Marketplace Item {} ".format(name) + ) + version = get_mpi_latest_version( + name=name, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + app_source=app_source, + type=type, + ) + LOG.info(version) + + LOG.info( + "Fetching details of published marketplace item {} with version {}".format( + name, version + ) + ) + item = get_mpi_by_name_n_version( + name=name, + version=version, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + app_source=app_source, + type=type, + ) + item_uuid = item["metadata"]["uuid"] + + res, err = client.market_place.read(item_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + item_data = res.json() + item_data.pop("status", None) + item_data["api_version"] = "3.0" + item_data["spec"]["resources"]["app_state"] = MARKETPLACE_ITEM.STATES.ACCEPTED + + res, err = client.market_place.update(uuid=item_uuid, payload=item_data) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info( + "Marketplace Item {} with version {} is unpublished successfully".format( + name, version + ) + ) + + +def unpublish_marketplace_bp(name, version, app_source=None): + """unpublishes marketplace blueprint""" + + if not version: + # Search for published blueprints, only those can be unpublished + LOG.info( + "Fetching latest version of published Marketplace Item {} ".format(name) + ) + version = get_mpi_latest_version( + name=name, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + app_source=app_source, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + LOG.info(version) + + LOG.info( + "Fetching details of published marketplace blueprint {} with version {}".format( + name, version + ) + ) + mpi_item = get_mpi_by_name_n_version( + name=name, + version=version, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + app_source=app_source, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + item_type = MARKETPLACE_ITEM.TYPES.BLUEPRINT + CALM_VERSION = Version.get_version("Calm") + if LV(CALM_VERSION) >= LV("3.2.0"): + item_type = mpi_item["status"]["resources"]["type"] + + if item_type != "blueprint": + LOG.error( + "Marketplace blueprint {} with version {} not found".format(name, version) + ) + sys.exit(-1) + + unpublish_marketplace_item( + name=name, + version=version, + app_source=app_source, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +def publish_runbook_to_marketplace_manager( + runbook_name, + marketplace_item_name, + version, + description="", + with_secrets=False, + with_endpoints=False, + app_group_uuid=None, + icon_name=None, + icon_file=None, +): + + client = get_api_client() + context = get_context() + server_config = context.get_server_config() + + runbook = get_runbook(client, runbook_name) + runbook_uuid = runbook.get("metadata", {}).get("uuid", "") + + mpi_spec = { + "spec": { + "name": marketplace_item_name, + "description": description, + "resources": { + "app_attribute_list": ["FEATURED"], + "icon_reference_list": [], + "author": server_config["pc_username"], + "version": version, + "type": MARKETPLACE_ITEM.TYPES.RUNBOOK, + "app_group_uuid": app_group_uuid or str(uuid.uuid4()), + "runbook_template_info": { + "is_published_with_secrets": with_secrets, + "is_published_with_endpoints": with_endpoints, + "source_runbook_reference": { + "uuid": runbook_uuid, + "kind": "runbook", + "name": runbook_name, + }, + }, + }, + }, + "api_version": "3.0", + "metadata": {"kind": "marketplace_item"}, + } + + if icon_name: + if icon_file: + # If file is there, upload first and then use it for marketplace item + client.app_icon.upload(icon_name, icon_file) + + app_icon_name_uuid_map = client.app_icon.get_name_uuid_map() + app_icon_uuid = app_icon_name_uuid_map.get(icon_name, None) + if not app_icon_uuid: + LOG.error("App icon: {} not found".format(icon_name)) + sys.exit(-1) + + mpi_spec["spec"]["resources"]["icon_reference_list"] = [ + { + "icon_type": "ICON", + "icon_reference": {"kind": "file_item", "uuid": app_icon_uuid}, + } + ] + + res, err = client.market_place.create(mpi_spec) + LOG.debug("Api response: {}".format(res.json())) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + LOG.info("Marketplace Runbook is published to marketplace manager successfully") + + +def publish_runbook_as_new_marketplace_item( + runbook_name, + marketplace_item_name, + version, + description="", + with_secrets=False, + with_endpoints=False, + publish_to_marketplace=False, + auto_approve=False, + projects=[], + category=None, + icon_name=None, + icon_file=None, +): + + # Search whether this marketplace item exists or not + LOG.info( + "Fetching existing marketplace runbooks with name {}".format( + marketplace_item_name + ) + ) + res = get_mpis_group_call( + name=marketplace_item_name, + group_member_count=1, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + group_count = res["filtered_group_count"] + + if group_count: + LOG.error( + "A local marketplace item exists with same name ({}) in another app family".format( + marketplace_item_name + ) + ) + sys.exit(-1) + + publish_runbook_to_marketplace_manager( + runbook_name=runbook_name, + marketplace_item_name=marketplace_item_name, + version=version, + description=description, + with_secrets=with_secrets, + with_endpoints=with_endpoints, + icon_name=icon_name, + icon_file=icon_file, + ) + + if publish_to_marketplace or auto_approve: + if not projects: + context = get_context() + project_config = context.get_project_config() + projects = [project_config["name"]] + + approve_marketplace_item( + name=marketplace_item_name, + version=version, + projects=projects, + category=category, + ) + + if publish_to_marketplace: + publish_marketplace_item( + name=marketplace_item_name, + version=version, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + + +def publish_runbook_as_existing_marketplace_item( + runbook_name, + marketplace_item_name, + version, + description="", + with_secrets=False, + with_endpoints=False, + publish_to_marketplace=False, + auto_approve=False, + projects=[], + category=None, + icon_name=None, + icon_file=None, +): + + LOG.info( + "Fetching existing marketplace runbooks with name {}".format( + marketplace_item_name + ) + ) + res = get_mpis_group_call( + name=marketplace_item_name, + group_member_count=1, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + group_results = res["group_results"] + if not group_results: + LOG.error( + "No local marketplace runbook exists with name {}".format( + marketplace_item_name + ) + ) + sys.exit(-1) + + entity_group = group_results[0] + app_group_uuid = entity_group["group_by_column_value"] + + # Search whether given version of marketplace items already exists or not + # Rejected MPIs with same name and version can exist + LOG.info( + "Fetching existing versions of Marketplace Item {}".format( + marketplace_item_name + ) + ) + res = get_mpis_group_call( + app_group_uuid=app_group_uuid, + app_states=[ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ], + ) + + group_results = res["group_results"] + entity_results = group_results[0]["entity_results"] + + for entity in entity_results: + entity_version = get_group_data_value(entity["data"], "version") + entity_app_state = get_group_data_value(entity["data"], "app_state") + + if entity_version == version: + LOG.error( + "An item exists with same version ({}) and app_state ({}) in the chosen app family.".format( + entity_version, entity_app_state + ) + ) + sys.exit(-1) + + publish_runbook_to_marketplace_manager( + runbook_name=runbook_name, + marketplace_item_name=marketplace_item_name, + version=version, + description=description, + with_secrets=with_secrets, + app_group_uuid=app_group_uuid, + icon_name=icon_name, + icon_file=icon_file, + ) + + if publish_to_marketplace or auto_approve: + if not projects: + context = get_context() + project_config = context.get_project_config() + projects = [project_config["name"]] + + approve_marketplace_item( + name=marketplace_item_name, + version=version, + projects=projects, + category=category, + ) + + if publish_to_marketplace: + publish_marketplace_item( + name=marketplace_item_name, + version=version, + app_source=MARKETPLACE_ITEM.SOURCES.LOCAL, + ) + + +def execute_marketplace_runbook( + name, + version, + project_name, + app_source=None, + ignore_runtime_variables=False, + watch=False, + app_states=[], +): + """ + Execute marketplace runbooks + If version not there search in published, pending, accepted runbooks + """ + + client = get_api_client() + params = { + "filter": "name=={};type=={}".format(name, MARKETPLACE_ITEM.TYPES.RUNBOOK) + } + mp_item_map = client.market_place.get_name_uuid_map(params=params) + if not mp_item_map: + LOG.error("No marketplace runbook found with name {}".format(name)) + sys.exit(-1) + + if not app_states: + app_states = [ + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, + MARKETPLACE_ITEM.STATES.PENDING, + ] + + if not version: + LOG.info("Fetching latest version of Marketplace Runbook {} ".format(name)) + version = get_mpi_latest_version( + name=name, + app_source=app_source, + app_states=app_states, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + LOG.info(version) + + client = get_api_client() + context = get_context() + + project_config = context.get_project_config() + + project_name = project_name or project_config["name"] + project_data = get_project(project_name) + + project_uuid = project_data["metadata"]["uuid"] + + LOG.info("Fetching MPI details") + mpi_data = get_mpi_by_name_n_version( + name=name, + version=version, + app_source=app_source, + app_states=app_states, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + mpi_type = mpi_data["status"]["resources"]["type"] + if mpi_type != MARKETPLACE_ITEM.TYPES.RUNBOOK: + LOG.error("Selected marketplace item is not of type runbook") + return + + mpi_uuid = mpi_data["metadata"]["uuid"] + payload = { + "api_version": "3.0", + "metadata": { + "kind": "runbook", + "project_reference": {"uuid": project_uuid, "kind": "project"}, + }, + "spec": { + "resources": { + "args": [], + "marketplace_reference": {"kind": "marketplace_item", "uuid": mpi_uuid}, + } + }, + } + + patch_runbook_endpoints(client, mpi_data, payload) + if not ignore_runtime_variables: + patch_runbook_runtime_editables(client, mpi_data, payload) + + def render_runbook(screen): + screen.clear() + screen.refresh() + execute_marketplace_runbook_renderer(screen, client, watch, payload=payload) + screen.wait_for_input(10.0) + + Display.wrapper(render_runbook, watch) + + +def execute_marketplace_runbook_renderer(screen, client, watch, payload={}): + + res, err = client.runbook.marketplace_execute(payload) + if not err: + screen.clear() + screen.print_at("Runbook queued for run", 0, 0) + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + runlog_uuid = response["status"]["runlog_uuid"] + + def poll_runlog_status(): + return client.runbook.poll_action_run(runlog_uuid) + + screen.refresh() + should_continue = poll_action(poll_runlog_status, get_runlog_status(screen)) + if not should_continue: + return + res, err = client.runbook.poll_action_run(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + runbook = response["status"]["runbook_json"]["resources"]["runbook"] + + if watch: + screen.refresh() + watch_runbook(runlog_uuid, runbook, screen=screen) + + context = get_context() + server_config = context.get_server_config() + + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + run_url = "https://{}:{}/console/#page/explore/calm/runbooks/runlogs/{}".format( + pc_ip, pc_port, runlog_uuid + ) + if not watch: + screen.print_at( + "Runbook execution url: {}".format(highlight_text(run_url)), 0, 0 + ) + screen.refresh() + + +def patch_runbook_endpoints(client, mpi_data, payload): + template_info = mpi_data["status"]["resources"].get("runbook_template_info", {}) + runbook = template_info.get("runbook_template", {}) + + if template_info.get("is_published_with_endpoints", False): + # No patching of endpoints required as runbook is published with endpoints + return payload + + default_target = input("Default target for execution of Marketplace Runbook:") + + if default_target: + endpoint = get_endpoint(client, default_target) + endpoint_id = endpoint.get("metadata", {}).get("uuid", "") + payload["spec"]["resources"]["default_target_reference"] = { + "kind": "app_endpoint", + "uuid": endpoint_id, + "name": default_target, + } + + tasks = runbook["spec"]["resources"]["runbook"]["task_definition_list"] + used_endpoints = [] + for task in tasks: + target_name = task.get("target_any_local_reference", {}).get("name", "") + if target_name: + used_endpoints.append(target_name) + + endpoints_description_map = {} + for ep_info in runbook["spec"]["resources"].get("endpoints_information", []): + ep_name = ep_info.get("endpoint_reference", {}).get("name", "") + ep_description = ep_info.get("description", "") + if ep_name and ep_description: + endpoints_description_map[ep_name] = ep_description + + if used_endpoints: + LOG.info( + "Please select an endpoint belonging to the selected project for every endpoint used in the marketplace\ + /item." + ) + endpoints_mapping = {} + for used_endpoint in used_endpoints: + des = endpoints_description_map.get(used_endpoint, used_endpoint) + selected_endpoint = input("{}:".format(des)) + if selected_endpoint: + endpoint = get_endpoint(client, selected_endpoint) + endpoint_id = endpoint.get("metadata", {}).get("uuid", "") + endpoints_mapping[used_endpoint] = endpoint_id + + payload["spec"]["resources"]["endpoints_mapping"] = endpoints_mapping + + +def patch_runbook_runtime_editables(client, mpi_data, payload): + + runbook = ( + mpi_data["status"]["resources"] + .get("runbook_template_info", {}) + .get("runbook_template", {}) + ) + variable_list = runbook["spec"]["resources"]["runbook"].get("variable_list", []) + args = payload.get("spec", {}).get("resources", {}).get("args", []) + + for variable in variable_list: + if variable.get("editables", {}).get("value", False): + new_val = input( + "Value for Variable {} in Runbook (default value={}): ".format( + variable.get("name"), variable.get("value", "") + ) + ) + if new_val: + args.append( + { + "name": variable.get("name"), + "value": type(variable.get("value", ""))(new_val), + } + ) + + payload["spec"]["resources"]["args"] = args + return payload diff --git a/framework/calm/dsl/cli/marketplace_bp_commands.py b/framework/calm/dsl/cli/marketplace_bp_commands.py new file mode 100644 index 0000000..4ea5a1b --- /dev/null +++ b/framework/calm/dsl/cli/marketplace_bp_commands.py @@ -0,0 +1,580 @@ +import click + +from .marketplace_commands_main import ( + marketplace_get, + marketplace_describe, + marketplace_launch, + marketplace_decompile, + marketplace_approve, + marketplace_publish, + marketplace_update, + marketplace_delete, + marketplace_reject, + publish, + marketplace_unpublish, +) +from .marketplace import ( + get_marketplace_items, + describe_marketplace_item, + launch_marketplace_bp, + publish_bp_as_new_marketplace_bp, + publish_bp_as_existing_marketplace_bp, + approve_marketplace_item, + publish_marketplace_item, + update_marketplace_item, + delete_marketplace_item, + reject_marketplace_item, + decompile_marketplace_bp, + unpublish_marketplace_bp, +) +from .constants import MARKETPLACE_ITEM + +APP_STATES = [ + MARKETPLACE_ITEM.STATES.PENDING, + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.REJECTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, +] +APP_SOURCES = [ + MARKETPLACE_ITEM.SOURCES.GLOBAL, + MARKETPLACE_ITEM.SOURCES.LOCAL, +] + + +# TODO Add limit and offset +@marketplace_get.command("bps") +@click.option( + "--name", "-n", default=None, help="Filter by name of marketplace blueprints" +) +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only marketplace blueprint names", +) +@click.option( + "--app_family", + "-f", + default="All", + help="Filter by app family category of marketplace blueprints", +) +@click.option( + "--app_state", + "-a", + "app_states", + type=click.Choice(APP_STATES), + multiple=True, + help="filter by state of marketplace blueprints", +) +@click.option( + "--filter", + "filter_by", + "-fb", + default=None, + help="Filter marketplace blueprints by this string", +) +def _get_marketplace_bps(name, quiet, app_family, app_states, filter_by): + """Get marketplace manager blueprints""" + + get_marketplace_items( + name=name, + quiet=quiet, + app_family=app_family, + app_states=app_states, + filter_by=filter_by, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +@marketplace_describe.command("bp") +@click.argument("name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format.", +) +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +@click.option( + "--app_state", + "-a", + default=None, + type=click.Choice(APP_STATES), + help="State of marketplace blueprint", +) +def _describe_marketplace_bp(name, out, version, source, app_state): + """Describe a marketplace manager blueprint""" + + describe_marketplace_item( + name=name, out=out, version=version, app_source=source, app_state=app_state + ) + + +@marketplace_launch.command("bp") +@click.argument("name") +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option("--project", "-pj", default=None, help="Project for the application") +@click.option( + "--environment", "-e", default=None, help="Environment for the application" +) +@click.option("--app_name", "-a", default=None, help="Name of your app") +@click.option( + "--profile_name", + "-p", + default=None, + help="Name of app profile to be used for blueprint launch", +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults for blueprint launch", +) +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +@click.option( + "--poll-interval", + "poll_interval", + "-pi", + type=int, + default=10, + show_default=True, + help="Give polling interval", +) +@click.option( + "--launch_params", + "-l", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +def _launch_marketplace_bp( + name, + version, + project, + environment, + app_name, + profile_name, + ignore_runtime_variables, + source, + launch_params, + watch, + poll_interval, +): + """Launch a marketplace manager blueprint + All runtime variables will be prompted by default. When passing the 'ignore_runtime_variables' flag, no variables will be prompted and all default values will be used. + The marketplace-blueprint default values can be overridden by passing a Python file via 'launch_params'. Any variable not defined in the Python file will keep the default + value defined in the blueprint. When passing a Python file, no variables will be prompted. + + \b + Note: Dynamic variables will not have a default value. User have to select an option during launch. + + \b + >: launch_params: Python file consisting of variables 'variable_list' and 'substrate_list' + Ex: variable_list = [ + { + "value": {"value": }, + "context": + "name": "" + } + ] + substrate_list = [ + { + "value": { + + }, + "name": , + } + ] + deployment_list = [ + { + "value": { + + }, + "name": , + } + ] + credential_list = [ + { + "value": { + + }, + "name": , + } + ] + Sample context for variables: + 1. context = "" # For variable under profile + 2. context = "" # For variable under service + """ + + launch_marketplace_bp( + name=name, + version=version, + project=project, + environment=environment, + app_name=app_name, + profile_name=profile_name, + patch_editables=not ignore_runtime_variables, + app_source=source, + launch_params=launch_params, + watch=watch, + poll_interval=poll_interval, + ) + + +@marketplace_decompile.command("bp", experimental=True) +@click.argument("mpi_name") +@click.option("--name", "-n", default=None, help="Name of blueprint") +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option("--project", "-p", default=None, help="Project for the blueprint") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +@click.option( + "--with_secrets", + "-w", + is_flag=True, + default=False, + help="Interactive Mode to provide the value for secrets", +) +@click.option( + "--dir", + "-d", + "bp_dir", + default=None, + help="Blueprint directory location used for placing decompiled entities", +) +def _decompile_marketplace_bp( + mpi_name, version, project, name, source, with_secrets, bp_dir +): + """Decompiles marketplace manager blueprint + + + \b + Sample command examples: + i.) calm decompile marketplace bp "Jenkins" : Command will decompile marketplace blueprint "Jenkins" having latest version + ii.) calm decompile marketplace bp "Jenkins" --version "1.0.0": Command will decompile marketplace blueprint "Jenkins" having "1.0.0" version + iii.) calm decompile marketplace bp "Jenkins" --name "DSL_JENKINS_BLUEPRINT": Command will decompile marketplace bp "Jenkins" to DSL blueprint having name "DSL_JENKINS_BLUEPRINT" (see the name of blueprint class in decompiled blueprint.py file)""" + + decompile_marketplace_bp( + name=mpi_name, + version=version, + project=project, + bp_name=name, + app_source=None, + with_secrets=with_secrets, + bp_dir=bp_dir, + ) + + +@publish.command("bp") +@click.argument("bp_name") +@click.option("--version", "-v", required=True, help="Version of marketplace blueprint") +@click.option("--name", "-n", default=None, help="Name of marketplace Blueprint") +@click.option( + "--description", "-d", default="", help="Description for marketplace blueprint" +) +@click.option( + "--with_secrets", + "-w", + is_flag=True, + default=False, + help="Preserve secrets while publishing blueprints to marketpalce", +) +@click.option( + "--existing_markeplace_bp", + "-e", + is_flag=True, + default=False, + help="Publish as new version of existing marketplace blueprint", +) +@click.option( + "--publish_to_marketplace", + "-pm", + is_flag=True, + default=False, + help="Publish the blueprint directly to marketplace skipping the steps to approve, etc.", +) +@click.option( + "--auto_approve", + "-aa", + is_flag=True, + default=False, + help="Auto approves the blueprint", +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace blueprint (used for approving blueprint)", +) +@click.option( + "--category", + "-c", + default=None, + help="Category for marketplace blueprint (used for approving blueprint)", +) +@click.option( + "--file", + "-f", + "icon_file", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of app icon image to be uploaded", +) +@click.option( + "--icon_name", "-i", default=None, help="App icon name for marketpalce blueprint" +) +@click.option( + "--all_projects", + "-ap", + is_flag=True, + default=False, + help="Publishes bp to all projects", +) +def publish_bp( + bp_name, + name, + version, + description, + with_secrets, + existing_markeplace_bp, + publish_to_marketplace, + projects=[], + category=None, + auto_approve=False, + icon_name=False, + icon_file=None, + all_projects=False, +): + """Publish a blueprint to marketplace manager""" + + if not name: + # Using blueprint name as the marketplace bp name if no name provided + name = bp_name + + if not existing_markeplace_bp: + publish_bp_as_new_marketplace_bp( + bp_name=bp_name, + marketplace_bp_name=name, + version=version, + description=description, + with_secrets=with_secrets, + publish_to_marketplace=publish_to_marketplace, + projects=projects, + category=category, + auto_approve=auto_approve, + icon_name=icon_name, + icon_file=icon_file, + all_projects=all_projects, + ) + + else: + publish_bp_as_existing_marketplace_bp( + bp_name=bp_name, + marketplace_bp_name=name, + version=version, + description=description, + with_secrets=with_secrets, + publish_to_marketplace=publish_to_marketplace, + projects=projects, + category=category, + auto_approve=auto_approve, + icon_name=icon_name, + icon_file=icon_file, + all_projects=all_projects, + ) + + +@marketplace_approve.command("bp") +@click.argument("name", nargs=1) +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option( + "--category", "-c", default=None, help="Category for marketplace blueprint" +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace blueprint", +) +@click.option( + "--all_projects", + "-ap", + is_flag=True, + default=False, + help="Approve bp to all projects", +) +def approve_bp(name, version, category, all_projects, projects=[]): + """Approves a marketplace manager blueprint""" + + approve_marketplace_item( + name=name, + version=version, + projects=projects, + category=category, + all_projects=all_projects, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +@marketplace_publish.command("bp") +@click.argument("name", nargs=1) +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option( + "--category", "-c", default=None, help="Category for marketplace blueprint" +) +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source for marketplace blueprint", +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace blueprint", +) +@click.option( + "--all_projects", + "-ap", + is_flag=True, + default=False, + help="Approve bp to all projects", +) +def _publish_marketplace_bp(name, version, category, source, all_projects, projects=[]): + """Publish a marketplace blueprint to marketplace store""" + + publish_marketplace_item( + name=name, + version=version, + projects=projects, + category=category, + app_source=source, + all_projects=all_projects, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +@marketplace_update.command("bp") +@click.argument("name", nargs=1) +@click.option( + "--version", "-v", required=True, help="Version of marketplace blueprint" +) # Required to prevent unwanted update of published mpi +@click.option( + "--category", "-c", default=None, help="Category for marketplace blueprint" +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace blueprint", +) +@click.option("--description", "-d", help="Description for marketplace blueprint") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source for marketplace blueprint", +) +def _update_marketplace_bp(name, version, category, projects, description, source): + """Update a marketplace manager blueprint""" + + update_marketplace_item( + name=name, + version=version, + category=category, + projects=projects, + description=description, + app_source=source, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +@marketplace_delete.command("bp") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace blueprint" +) # Required to prevent unwanted delete of unknown mpi +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +@click.option( + "--app_state", + "-a", + default=None, + type=click.Choice(APP_STATES), + help="State of marketplace blueprint", +) +def _delete_marketplace_bp(name, version, source, app_state): + """Deletes marketplace manager blueprint""" + + delete_marketplace_item( + name=name, + version=version, + app_source=source, + app_state=app_state, + type=MARKETPLACE_ITEM.TYPES.BLUEPRINT, + ) + + +@marketplace_reject.command("bp") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace blueprint" +) # Required to prevent unwanted rejection of unknown mpi +def _reject_marketplace_bp(name, version): + """Reject marketplace manager blueprint""" + + reject_marketplace_item( + name=name, version=version, type=MARKETPLACE_ITEM.TYPES.BLUEPRINT + ) + + +@marketplace_unpublish.command("bp") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace blueprint" +) # Required to prevent unwanted unpublish of unknown mpi +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +def _unpublish_marketplace_bp(name, version, source): + """Unpublish marketplace store blueprint""" + + unpublish_marketplace_bp(name=name, version=version, app_source=source) diff --git a/framework/calm/dsl/cli/marketplace_commands_main.py b/framework/calm/dsl/cli/marketplace_commands_main.py new file mode 100644 index 0000000..1559180 --- /dev/null +++ b/framework/calm/dsl/cli/marketplace_commands_main.py @@ -0,0 +1,83 @@ +from .main import ( + get, + describe, + launch, + publish, + approve, + update, + delete, + reject, + unpublish, + decompile, + run, +) +from .utils import FeatureFlagGroup + + +# Marketplace Commands + + +@get.group("marketplace", cls=FeatureFlagGroup) +def marketplace_get(): + """Get marketplace entities""" + pass + + +@describe.group("marketplace", cls=FeatureFlagGroup) +def marketplace_describe(): + """Describe marketplace entities""" + pass + + +@launch.group("marketplace", cls=FeatureFlagGroup) +def marketplace_launch(): + """Launch marketplace entities""" + pass + + +@decompile.group("marketplace", cls=FeatureFlagGroup) +def marketplace_decompile(): + """Decompile marketplace entities""" + pass + + +@approve.group("marketplace", cls=FeatureFlagGroup) +def marketplace_approve(): + """Approve marketplace entities""" + pass + + +@publish.group("marketplace", cls=FeatureFlagGroup) +def marketplace_publish(): + """Publish marketplace entities""" + pass + + +@update.group("marketplace", cls=FeatureFlagGroup) +def marketplace_update(): + """Update marketplace entities""" + pass + + +@delete.group("marketplace", cls=FeatureFlagGroup) +def marketplace_delete(): + """Delete marketplace entities""" + pass + + +@reject.group("marketplace", cls=FeatureFlagGroup) +def marketplace_reject(): + """Reject marketplace entities""" + pass + + +@unpublish.group("marketplace", cls=FeatureFlagGroup) +def marketplace_unpublish(): + """Unpublish marketplace entities""" + pass + + +@run.group("marketplace", cls=FeatureFlagGroup) +def marketplace_run(): + """Run marketplace entities""" + pass diff --git a/framework/calm/dsl/cli/marketplace_item_commands.py b/framework/calm/dsl/cli/marketplace_item_commands.py new file mode 100644 index 0000000..9ea1b86 --- /dev/null +++ b/framework/calm/dsl/cli/marketplace_item_commands.py @@ -0,0 +1,264 @@ +import click + +from .marketplace_commands_main import ( + marketplace_get, + marketplace_describe, + marketplace_launch, + marketplace_run, + marketplace_unpublish, +) +from .marketplace import ( + get_marketplace_store_items, + unpublish_marketplace_item, + describe_marketplace_store_item, + execute_marketplace_runbook, + launch_marketplace_item, +) +from .constants import MARKETPLACE_ITEM + +APP_SOURCES = [ + MARKETPLACE_ITEM.SOURCES.GLOBAL, + MARKETPLACE_ITEM.SOURCES.LOCAL, +] + + +@marketplace_describe.command("item") +@click.argument("name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +@click.option("--version", "-v", default=None, help="Version of marketplace item") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source for marketplace item", +) +def _describe_marketplace_item(name, out, version, source): + """Describe a marketplace store item""" + + describe_marketplace_store_item( + name=name, out=out, version=version, app_source=source + ) + + +@marketplace_get.command("items") +@click.option("--name", "-n", default=None, help="Filter by name of marketplace items") +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only marketplace item names", +) +@click.option( + "--app_family", + "-f", + default="All", + help="Filter by app family category of marketplace item", +) +@click.option( + "--display_all", + "-d", + is_flag=True, + default=False, + help="Show all marketplace items which are published", +) +@click.option( + "--filter", + "filter_by", + "-fb", + default=None, + help="Filter marketplace items by this string", +) +def _get_marketplace_items(name, quiet, app_family, display_all, filter_by): + """Get marketplace store items""" + + get_marketplace_store_items( + name=name, + quiet=quiet, + app_family=app_family, + display_all=display_all, + filter_by=filter_by, + ) + + +@marketplace_launch.command("item") +@click.argument("name") +@click.option("--version", "-v", default=None, help="Version of marketplace blueprint") +@click.option("--project", "-pj", default=None, help="Project for the application") +@click.option( + "--environment", "-e", default=None, help="Environment for the application" +) +@click.option("--app_name", "-a", default=None, help="Name of app") +@click.option( + "--profile_name", + "-p", + default=None, + help="Name of app profile to be used for blueprint launch", +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults for blueprint launch", +) +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace blueprint", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +@click.option( + "--poll-interval", + "poll_interval", + "-pi", + type=int, + default=10, + show_default=True, + help="Give polling interval", +) +@click.option( + "--launch_params", + "-l", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to python file for runtime editables", +) +def _launch_marketplace_item( + name, + version, + project, + environment, + app_name, + profile_name, + ignore_runtime_variables, + source, + launch_params, + watch, + poll_interval, +): + """Launch a marketplace store item of type blueprint + All runtime variables will be prompted by default. When passing the 'ignore_runtime_variables' flag, no variables will be prompted and all default values will be used. + The marketplace-blueprint default values can be overridden by passing a Python file via 'launch_params'. Any variable not defined in the Python file will keep the default + value defined in the blueprint. When passing a Python file, no variables will be prompted. + + \b + Note: Dynamic variables will not have a default value. User have to select an option during launch. + + \b + >: launch_params: Python file consisting of variables 'variable_list' and 'substrate_list' + Ex: variable_list = [ + { + "value": {"value": }, + "context": + "name": "" + } + ] + substrate_list = [ + { + "value": { + + }, + "name": , + } + ] + deployment_list = [ + { + "value": { + + }, + "name": , + } + ] + credential_list = [ + { + "value": { + + }, + "name": , + } + ] + Sample context for variables: + 1. context = "" # For variable under profile + 2. context = "" # For variable under service + """ + + launch_marketplace_item( + name=name, + version=version, + project=project, + environment=environment, + app_name=app_name, + profile_name=profile_name, + patch_editables=not ignore_runtime_variables, + app_source=source, + launch_params=launch_params, + watch=watch, + poll_interval=poll_interval, + ) + + +@marketplace_run.command("item", feature_min_version="3.2.0") +@click.argument("name") +@click.option("--version", "-v", default=None, help="Version of marketplace item") +@click.option("--project", "-pj", default=None, help="Project for the execution") +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults for runbook execution", +) +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace item", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def _run_marketplace_item( + name, version, project, source, ignore_runtime_variables, watch +): + """Execute a marketplace item of type runbook""" + + execute_marketplace_runbook( + name=name, + version=version, + project_name=project, + app_source=source, + watch=watch, + app_states=[MARKETPLACE_ITEM.STATES.PUBLISHED], + ignore_runtime_variables=ignore_runtime_variables, + ) + + +@marketplace_unpublish.command("item") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace item" +) # Required to prevent unwanted unpublish of unknown mpi +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace item", +) +def _unpublish_marketplace_item(name, version, source): + """Unpublish marketplace store item""" + + unpublish_marketplace_item( + name=name, + version=version, + app_source=source, + ) diff --git a/framework/calm/dsl/cli/marketplace_runbook_commands.py b/framework/calm/dsl/cli/marketplace_runbook_commands.py new file mode 100644 index 0000000..cb84c1a --- /dev/null +++ b/framework/calm/dsl/cli/marketplace_runbook_commands.py @@ -0,0 +1,419 @@ +import click + +from .marketplace_commands_main import ( + marketplace_get, + marketplace_describe, + marketplace_approve, + marketplace_publish, + marketplace_update, + marketplace_delete, + marketplace_reject, + marketplace_run, + publish, +) +from .marketplace import ( + get_marketplace_items, + describe_marketplace_item, + publish_runbook_as_new_marketplace_item, + publish_runbook_as_existing_marketplace_item, + approve_marketplace_item, + publish_marketplace_item, + update_marketplace_item, + delete_marketplace_item, + reject_marketplace_item, + execute_marketplace_runbook, +) +from .constants import MARKETPLACE_ITEM + +APP_STATES = [ + MARKETPLACE_ITEM.STATES.PENDING, + MARKETPLACE_ITEM.STATES.ACCEPTED, + MARKETPLACE_ITEM.STATES.REJECTED, + MARKETPLACE_ITEM.STATES.PUBLISHED, +] +APP_SOURCES = [ + MARKETPLACE_ITEM.SOURCES.GLOBAL, + MARKETPLACE_ITEM.SOURCES.LOCAL, +] + + +# TODO Add limit and offset +@marketplace_get.command("runbooks", feature_min_version="3.2.0") +@click.option( + "--name", "-n", default=None, help="Filter by name of marketplace runbooks" +) +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only marketplace runbooks names", +) +@click.option( + "--app_family", + "-f", + default="All", + help="Filter by app family category of marketplace runbooks", +) +@click.option( + "--app_state", + "-a", + "app_states", + type=click.Choice(APP_STATES), + multiple=True, + help="filter by state of marketplace runbooks", +) +@click.option( + "--filter", + "filter_by", + "-fb", + default=None, + help="Filter marketplace runbooks by this string", +) +def _get_marketplace_runbooks(name, quiet, app_family, app_states, filter_by): + """Get marketplace manager runbooks""" + + get_marketplace_items( + name=name, + quiet=quiet, + app_family=app_family, + app_states=app_states, + filter_by=filter_by, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + +@marketplace_describe.command("runbook", feature_min_version="3.2.0") +@click.argument("name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format.", +) +@click.option("--version", "-v", default=None, help="Version of marketplace runbooks") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace runbook", +) +@click.option( + "--app_state", + "-a", + default=None, + type=click.Choice(APP_STATES), + help="State of marketplace runbook", +) +def _describe_marketplace_runbook(name, out, version, source, app_state): + """Describe a marketplace manager runbook""" + + describe_marketplace_item( + name=name, out=out, version=version, app_source=source, app_state=app_state + ) + + +@marketplace_approve.command("runbook", feature_min_version="3.2.0") +@click.argument("name", nargs=1) +@click.option("--version", "-v", default=None, help="Version of marketplace runbook") +@click.option("--category", "-c", default=None, help="Category for marketplace runbook") +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace runbook", +) +@click.option( + "--all_projects", + "-ap", + is_flag=True, + default=False, + help="Approve runbook to all runbook", +) +def approve_runbook(name, version, category, all_projects, projects=[]): + """Approves a marketplace manager runbook""" + + approve_marketplace_item( + name=name, + version=version, + projects=projects, + category=category, + all_projects=all_projects, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + +@marketplace_publish.command("runbook", feature_min_version="3.2.0") +@click.argument("name", nargs=1) +@click.option("--version", "-v", default=None, help="Version of marketplace runbook") +@click.option("--category", "-c", default=None, help="Category for marketplace runbook") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source for marketplace runbook", +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace runbook", +) +@click.option( + "--all_projects", + "-ap", + is_flag=True, + default=False, + help="Approve runbook to all projects", +) +def _publish_marketplace_runbook( + name, version, category, source, all_projects, projects=[] +): + """Publish a marketplace runbook to marketplace store""" + + publish_marketplace_item( + name=name, + version=version, + projects=projects, + category=category, + app_source=source, + all_projects=all_projects, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + +@marketplace_update.command("runbook", feature_min_version="3.2.0") +@click.argument("name", nargs=1) +@click.option( + "--version", "-v", required=True, help="Version of marketplace runbook" +) # Required to prevent unwanted update of published mpi +@click.option("--category", "-c", default=None, help="Category for marketplace runbook") +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace runbook", +) +@click.option("--description", "-d", help="Description for marketplace runbook") +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source for marketplace runbook", +) +def _update_marketplace_runbook(name, version, category, projects, description, source): + """Update a marketplace manager runbook""" + + update_marketplace_item( + name=name, + version=version, + category=category, + projects=projects, + description=description, + app_source=source, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + +@marketplace_delete.command("runbook", feature_min_version="3.2.0") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace runbook" +) # Required to prevent unwanted delete of unknown mpi +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace runbook", +) +@click.option( + "--app_state", + "-a", + default=None, + type=click.Choice(APP_STATES), + help="State of marketplace runbook", +) +def _delete_marketplace_runbook(name, version, source, app_state): + """Deletes marketplace manager runbook""" + + delete_marketplace_item( + name=name, + version=version, + app_source=source, + app_state=app_state, + type=MARKETPLACE_ITEM.TYPES.RUNBOOK, + ) + + +@marketplace_reject.command("runbook", feature_min_version="3.2.0") +@click.argument("name") +@click.option( + "--version", "-v", required=True, help="Version of marketplace runbook" +) # Required to prevent unwanted rejection of unknown mpi +def _reject_marketplace_runbook(name, version): + """Reject marketplace manager runbook""" + + reject_marketplace_item( + name=name, version=version, type=MARKETPLACE_ITEM.TYPES.RUNBOOK + ) + + +@publish.command("runbook", feature_min_version="3.2.0") +@click.argument("runbook_name") +@click.option("--version", "-v", required=True, help="Version of marketplace runbook") +@click.option("--name", "-n", default=None, help="Name of marketplace runbook") +@click.option( + "--description", "-d", default="", help="Description for marketplace runbook" +) +@click.option( + "--with_secrets", + "-w", + is_flag=True, + default=False, + help="Preserve secrets while publishing runbooks to marketplace", +) +@click.option( + "--with_endpoints", + "-w", + is_flag=True, + default=False, + help="Preserve endpoints publishing runbooks to marketplace", +) +@click.option( + "--existing_markeplace_runbook", + "-e", + is_flag=True, + default=False, + help="Publish as new version of existing marketplace runbook", +) +@click.option( + "--publish_to_marketplace", + "-pm", + is_flag=True, + default=False, + help="Publish the runbook directly to marketplace skipping the steps to approve, etc.", +) +@click.option( + "--auto_approve", + "-aa", + is_flag=True, + default=False, + help="Auto approves the runbook", +) +@click.option( + "--project", + "-p", + "projects", + multiple=True, + help="Projects for marketplace runbook (used for approving runbook)", +) +@click.option( + "--category", + "-c", + default=None, + help="Category for marketplace runbook (used for approving runbook)", +) +@click.option( + "--file", + "-f", + "icon_file", + default=None, + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of app icon image to be uploaded", +) +@click.option( + "--icon_name", "-i", default=None, help="App icon name for marketplace runbook" +) +def publish_runbook( + runbook_name, + name, + version, + description, + with_secrets, + with_endpoints, + existing_markeplace_runbook, + publish_to_marketplace, + projects=[], + category=None, + auto_approve=False, + icon_name=False, + icon_file=None, +): + """Publish a runbook to marketplace manager""" + + if not name: + # Using runbook name as the marketplace runbook name if no name provided + name = runbook_name + + if not existing_markeplace_runbook: + publish_runbook_as_new_marketplace_item( + runbook_name=runbook_name, + marketplace_item_name=name, + version=version, + description=description, + with_secrets=with_secrets, + publish_to_marketplace=publish_to_marketplace, + projects=projects, + category=category, + auto_approve=auto_approve, + icon_name=icon_name, + icon_file=icon_file, + ) + + else: + publish_runbook_as_existing_marketplace_item( + runbook_name=runbook_name, + marketplace_item_name=name, + version=version, + description=description, + with_secrets=with_secrets, + publish_to_marketplace=publish_to_marketplace, + projects=projects, + category=category, + auto_approve=auto_approve, + icon_name=icon_name, + icon_file=icon_file, + ) + + +@marketplace_run.command("runbook", feature_min_version="3.2.0") +@click.argument("name") +@click.option("--version", "-v", default=None, help="Version of marketplace item") +@click.option("--project", "-pj", default=None, help="Project for the execution") +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults for runbook execution", +) +@click.option( + "--source", + "-s", + default=None, + type=click.Choice(APP_SOURCES), + help="App Source of marketplace item", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def _run_marketplace_runbook( + name, version, project, source, ignore_runtime_variables, watch +): + """Execute a marketplace item of type runbook""" + + execute_marketplace_runbook( + name=name, + version=version, + project_name=project, + app_source=source, + watch=watch, + ignore_runtime_variables=ignore_runtime_variables, + ) diff --git a/framework/calm/dsl/cli/network_group.py b/framework/calm/dsl/cli/network_group.py new file mode 100644 index 0000000..27ae856 --- /dev/null +++ b/framework/calm/dsl/cli/network_group.py @@ -0,0 +1,848 @@ +import time +import click +import json +import sys +from distutils.version import LooseVersion as LV +from prettytable import PrettyTable + + +from calm.dsl.api import get_api_client, network_group +from calm.dsl.builtins.models.network_group_tunnel import NetworkGroupTunnel +from calm.dsl.builtins.models.network_group_tunnel_payload import ( + create_network_group_tunnel_payload, +) +from calm.dsl.builtins.models.network_group_tunnel_vm_payload import ( + create_network_group_tunnel_vm_payload, +) +from calm.dsl.builtins.models.network_group_tunnel_vm_spec import ( + NetworkGroupTunnelVMSpec, +) +from calm.dsl.config import get_context +from calm.dsl.providers.base import get_provider + +from .utils import highlight_text +from calm.dsl.tools import get_module_from_file +from calm.dsl.log import get_logging_handle +from calm.dsl.builtins.models.helper.common import ( + get_network_group, + get_network_group_by_tunnel_name, +) +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE, NETWORK_GROUP_TUNNEL_TASK + +LOG = get_logging_handle(__name__) + + +def get_network_groups(limit, offset, quiet, out): + """Get the network groups, optionally filtered by a string""" + + client = get_api_client() + ContextObj = get_context() + server_config = ContextObj.get_server_config() + + params = {"length": limit, "offset": offset} + + res, err = client.network_group.list(params=params) + + if err: + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch network group from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No network group found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "UUID", + "NAME", + "VPC", + "TUNNEL", + "ACCOUNT", + ] + # TODO: Add API call to show VPC name or read from Cache + AhvVmProvider = get_provider("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + resources = row.get("resources", {}) + + account_uuid = resources.get("account_reference", {}).get("uuid") + res, err = client.account.read(account_uuid) + if err: + LOG.error( + "Failed to find account with uuid:{}, Skipping".format(account_uuid) + ) + continue + + account_info = res.json() + if ( + account_info.get("status", {}).get("resources", {}).get("state") + != "VERIFIED" + ): + LOG.warning( + "Account with UUID: {} not in VERIFIED state, Skipping".format( + account_uuid + ) + ) + continue + + account_name = account_info.get("metadata", {}).get("name") + vpc_uuid = resources.get("platform_vpc_uuid_list", [])[0] + vpc_filter = "(_entity_id_=={})".format(vpc_uuid) + vpcs = AhvObj.vpcs( + account_uuid=account_uuid, filter_query=vpc_filter, ignore_failures=True + ) + LOG.debug(vpcs) + if not vpcs or not vpcs.get("entities", []): + LOG.error( + "VPC with uuid:{} not found for account: {}".format( + vpc_uuid, account_name + ) + ) + continue + vpcs = vpcs.get("entities", []) + vpc_name = vpcs[0].get("spec", {}).get("name") + + table.add_row( + [ + highlight_text(metadata["uuid"]), + highlight_text(metadata["name"]), + highlight_text(vpc_name), + highlight_text(resources.get("tunnel_reference", {}).get("name", "")), + highlight_text(account_name), + ] + ) + click.echo(table) + + +def get_network_group_tunnels(limit, offset, quiet, out): + """Get the Network Group Tunnels, optionally filtered by a string""" + + client = get_api_client() + ContextObj = get_context() + server_config = ContextObj.get_server_config() + + params = {"length": limit, "offset": offset} + params.update( + { + "nested_attributes": [ + "tunnel_name", + "tunnel_vm_name", + "app_uuid", + "app_status", + ] + } + ) + + res, err = client.network_group.list(params=params) + + if err: + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch network group from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No network group found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "VPC", + "STATE", + "ACCOUNT", + "APPLICATION_NAME", + "TUNNEL_VM_NAME", + "CREATED_ON", + "UUID", + ] + # TODO: Add API call to show VPC name or read from Cache + AhvVmProvider = get_provider("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + resources = row.get("resources", {}) + + account_uuid = resources.get("account_reference", {}).get("uuid") + res, err = client.account.read(account_uuid) + if err: + LOG.error( + "Failed to find account with uuid:{}, Skipping".format(account_uuid) + ) + continue + LOG.debug("account information: {}".format(res.text)) + account_info = res.json() + verified_acc = True + if ( + account_info.get("status", {}).get("resources", {}).get("state") + != "VERIFIED" + ): + LOG.warning( + "Account with UUID: {} not in VERIFIED state, Skipping".format( + account_uuid + ) + ) + verified_acc = False + + account_name = account_info.get("metadata", {}).get("name") + vpc_uuid = resources.get("platform_vpc_uuid_list", [])[0] + vpc_filter = "(_entity_id_=={})".format(vpc_uuid) + vpc_name = "-" + if verified_acc: + vpcs = AhvObj.vpcs( + account_uuid=account_uuid, filter_query=vpc_filter, ignore_failures=True + ) + LOG.debug(vpcs) + if not vpcs or not vpcs.get("entities", []): + LOG.error( + "VPC with uuid:{} not found for account: {}".format( + vpc_uuid, account_name + ) + ) + continue + vpcs = vpcs.get("entities", []) + vpc_name = vpcs[0].get("spec", {}).get("name") + + app_uuid = resources.get("app_uuid") + tunnel_vm_name = resources.get("tunnel_vm_name", "-") + app_name = "-" + if app_uuid: + res, err = client.application.read(app_uuid) + if err: + LOG.warning("Application with UUID: {} not found, Skipping".format()) + app_info = res.json() + app_name = app_info.get("metadata", {}).get("name") + + tunnel_uuid = resources.get("tunnel_reference", {}).get("uuid") + tunnel_state = "-" + if tunnel_uuid: + res, err = client.tunnel.read(tunnel_uuid) + if err: + LOG.warning( + "Failed to get tunnel state information: {}, Skipping".format( + tunnel_uuid + ) + ) + tunnel_info = res.json() + LOG.debug("tunnel: {}".format(tunnel_info)) + tunnel_state = tunnel_info.get("status", {}).get("state") + tunnel_state = tunnel_state_mapper(tunnel_state) + + creation_time = int(tunnel_info.get("metadata").get("creation_time")) // 1000000 + + table.add_row( + [ + highlight_text(resources.get("tunnel_reference", {}).get("name", "-")), + highlight_text(vpc_name), + highlight_text(tunnel_state), + highlight_text(account_name), + highlight_text(app_name), + highlight_text(tunnel_vm_name), + highlight_text(time.ctime(creation_time)), + highlight_text(resources.get("tunnel_reference", {}).get("uuid", "")), + ] + ) + click.echo(table) + + +def tunnel_state_mapper(state): + tunnel_state_map = { + "NOT_VALIDATED": "Connecting", + "HEALTHY": "Connected", + "UNHEALTHY": "Disconnected", + "DELETING": "Deleting", + "CONNECTING": "Connecting", + "RECONNECTING": "Reconnecting", + "DISCONNECTING": "Disconnecting", + } + return tunnel_state_map.get(state, "Unknown") + + +def describe_network_group(network_group_name, out): + + network_group = get_network_group(network_group_name) + generate_describe_table(network_group, out) + + +def describe_network_group_tunnel(network_group_tunnel_name, out): + + network_group = get_network_group_from_tunnel_name(network_group_tunnel_name) + generate_describe_table(network_group, out) + + +def generate_describe_table(network_group, out): + + client = get_api_client() + network_group_name = network_group.get("metadata", {}).get("name") + if out == "json": + click.echo(json.dumps(network_group, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Network Group Summary----\n") + click.echo( + "Name: " + + highlight_text(network_group_name) + + " (uuid: " + + highlight_text(network_group["metadata"]["uuid"]) + + ")" + ) + network_group_resources = network_group["status"].get("resources", {}) + + click.echo("State: " + highlight_text(network_group_resources["state"])) + click.echo( + "Owner: " + highlight_text(network_group["metadata"]["owner_reference"]["name"]) + ) + + # created_on = arrow.get(network_group["metadata"]["creation_time"]) + # past = created_on.humanize() + # click.echo( + # "Created on: {} ({})".format( + # highlight_text(time.ctime(created_on.timestamp)), highlight_text(past) + # ) + # ) + + account = network_group_resources.get("account_reference", {}) + platform_vpcs = network_group_resources.get("platform_vpc_uuid_list", []) + + vpcCaches = [] + if platform_vpcs: + + for vpc in platform_vpcs: + vpc_cache_data = Cache.get_entity_data_using_uuid( + entity_type="ahv_vpc", uuid=vpc + ) + vpcCaches.append(vpc_cache_data) + if not vpc_cache_data: + LOG.error("VPC (uuid={}) not found. Please update cache".format(vpc)) + sys.exit(-1) + + click.echo("\n\tVPC:\n\t--------------------") + for vpc in vpcCaches: + vpc_name = vpc["name"] + vpc_uuid = vpc["uuid"] + + click.echo( + "\tName: {} (uuid: {})".format( + highlight_text(vpc_name), + highlight_text(vpc_uuid), + ) + ) + + tunnel_uuid = network_group_resources.get("tunnel_reference", {}).get( + "uuid" + ) + if tunnel_uuid: + res, err = client.tunnel.read(tunnel_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + tunnel_payload = res.json() + tunnel_status = tunnel_payload.get("status", {}) + tunnel_name = tunnel_status.get("name") + tunnel_state = tunnel_state_mapper(tunnel_status.get("state")) + app_uuid = network_group_resources.get("app_uuid") + + res, err = client.application.read(app_uuid) + if err: + LOG.error( + "Failed to fetch Tunnel Application due to {}".format(err) + ) + sys.exit(-1) + + app = res.json() + app_name = app.get("metadata", {}).get("name") + + click.echo("\n\tTunnel: \n\t--------------------") + click.echo("\t" + "Name: " + highlight_text(tunnel_name)) + click.echo("\t" + "UUID: " + highlight_text(tunnel_uuid)) + click.echo("\t" + "State: " + highlight_text(tunnel_state)) + click.echo("\t" + "Application UUID: " + highlight_text(app_uuid)) + click.echo("\t" + "Application Name: " + highlight_text(app_name)) + + tunnel_vm_name = network_group_resources.get("tunnel_vm_name") + app_status = network_group_resources.get("app_status") + click.echo("\n\t\tTunnel VM: \n\t\t--------------------") + click.echo("\t\t" + "Name: " + highlight_text(tunnel_vm_name)) + click.echo("\t\t" + "Status: " + highlight_text(app_status)) + + if account: + account_uuid = account.get("uuid", "") + account_payload = {} + account_name = account.get("name", "") + + res, err = client.account.read(account_uuid) + if err: + LOG.error(err) + sys.exit(-1) + account_payload = res.json() + + resources = account_payload.get("status", {}).get("resources", {}) + if not account_name: + account_name = resources.get("name", "") + + account_state = resources.get("state") + click.echo("\n\tAccount: \n\t--------------------") + click.echo("\t" + "Name: " + highlight_text(account_name)) + click.echo("\t" + "UUID: " + highlight_text(account_uuid)) + click.echo("\t" + "State: " + highlight_text(account_state)) + + +def get_network_group_tunnel_module_from_file(network_group_tunnel_file): + return get_module_from_file( + "calm.dsl.user_network_group_tunnel", network_group_tunnel_file + ) + + +def get_network_group_tunnel_vm_module_from_file(network_group_tunnel_vm_file): + return get_module_from_file( + "calm.dsl.user_network_group_tunnel_vm", network_group_tunnel_vm_file + ) + + +def get_network_group_tunnel_class_from_module(user_network_group_tunnel_module): + """Returns project class given a module""" + + UserProject = None + for item in dir(user_network_group_tunnel_module): + obj = getattr(user_network_group_tunnel_module, item) + if isinstance(obj, type(NetworkGroupTunnel)): + if obj.__bases__[0] == NetworkGroupTunnel: + UserProject = obj + + return UserProject + + +def get_network_group_tunnel_vm_class_from_module(user_network_group_tunnel_vm_module): + """Returns project class given a module""" + + UserProject = None + for item in dir(user_network_group_tunnel_vm_module): + obj = getattr(user_network_group_tunnel_vm_module, item) + if isinstance(obj, type(NetworkGroupTunnelVMSpec)): + if obj.__bases__[0] == NetworkGroupTunnelVMSpec: + UserProject = obj + + return UserProject + + +def compile_network_group_tunnel_dsl_class(UserNetworkGroupTunnel): + network_group_tunnel_payload, _ = create_network_group_tunnel_payload( + UserNetworkGroupTunnel + ) + return network_group_tunnel_payload.get_dict() + + +def compile_network_group_tunnel_vm_dsl_class( + UserNetworkGroupTunnelVMSpec, network_group_tunnel_name +): + network_group_tunnel_vm_payload, _ = create_network_group_tunnel_vm_payload( + UserNetworkGroupTunnelVMSpec, network_group_tunnel_name + ) + return network_group_tunnel_vm_payload.get_dict() + + +def watch_network_group_tunnel_launch_task(tunnel_setup_task, poll_interval): + + client = get_api_client() + cnt = 0 + app_uuid = None + milestone_reached = NETWORK_GROUP_TUNNEL_TASK.STATUS.QUEUED + app_state = "provisioning" + while True: + LOG.info( + "Fetching status of network group tunnel creation task: {}, state: {}".format( + tunnel_setup_task, app_state + ) + ) + + res, err = client.network_group.read_pending_task( + tunnel_setup_task, tunnel_setup_task + ) + if err: + LOG.error("Failed to read pending task status: {}".format(err)) + sys.exit(-1) + + res_json = res.json() + # LOG.info("Response is : {}".format(res_json)) + app_uuid = res_json.get("status", {}).get("application_uuid", "") + app_state = res_json.get("status", {}).get("state", "provisioning") + milestone_reached = res_json.get("status", {}).get( + "milestone", NETWORK_GROUP_TUNNEL_TASK.STATUS.QUEUED + ) + + if milestone_reached in NETWORK_GROUP_TUNNEL_TASK.TERMINAL_STATES: + message_list = res_json.get("status", {}).get("message_list", []) + if milestone_reached != NETWORK_GROUP_TUNNEL_TASK.STATUS.SUCCESS: + if message_list: + LOG.error(message_list) + LOG.info( + "Network Group tunnel creation task reached terminal status: {}".format( + app_state + ) + ) + return (milestone_reached, app_uuid) + + time.sleep(poll_interval) + cnt += 1 + if cnt == 20: + break + + LOG.info( + "Task couldn't reached to terminal state in {} seconds. Exiting...".format( + poll_interval * 20 + ) + ) + return (milestone_reached, app_uuid) + + +def watch_network_group_tunnel_app(account_uuid, network_group_name, poll_interval): + + cnt = 0 + network_group_json = {} + app_state = "provisioning" + while True: + network_group_json = get_network_group_by_name(account_uuid, network_group_name) + app_uuid = ( + network_group_json.get("status", {}).get("resources", {}).get("app_uuid") + ) + app_state = ( + network_group_json.get("status", {}).get("resources", {}).get("app_status") + ) + LOG.info("Application uuid: {}, status: {}".format(app_uuid, app_state)) + if app_state == "running": + LOG.info( + "Network Group Tunnel Provisioned successfully, wait for 5 minutes for Tunnel Sync" + ) + return (network_group_json, app_state) + + time.sleep(poll_interval) + cnt += 1 + if cnt == 20: + break + + LOG.info( + "Application did not reach Running status in {}. Exiting...".format( + 20 * poll_interval + ) + ) + return (network_group_json, app_state) + + +def get_network_group_by_name( + account_uuid=None, network_group_name=None, tunnel_name=None +): + + client = get_api_client() + # LOG.info("Searching for Network Group using Network Group name:{}, Tunnel Name: {}".format(network_group_name, tunnel_name)) + filter_param = {} + filter_query = [] + + if account_uuid: + filter_query.append("account_uuid=={}".format(account_uuid)) + + if network_group_name: + filter_query.append("name=={}".format(network_group_name)) + + if filter_query: + filter_param = {"filter": ";".join(filter_query)} + + filter_param.update( + { + "nested_attributes": [ + "tunnel_name", + "tunnel_vm_name", + "app_uuid", + "app_status", + ] + } + ) + + res, err = client.network_group.list(params=filter_param) + if err: + LOG.error( + "Failed to get Network Group Tunnel information due to {}".format(err) + ) + sys.exit(-1) + + res_dict = res.json() + network_group_json = {} + for entity in res_dict.get("entities", []): + if ( + network_group_name + and entity.get("metadata", {}).get("name", "") == network_group_name + ): + network_group_json = entity + break + elif tunnel_name: + api_tunnel_name = ( + entity.get("status", {}).get("resources", {}).get("tunnel_name") + ) + if tunnel_name == api_tunnel_name: + network_group_json = entity + break + + return network_group_json + + +def create_network_group_tunnel_vm(tunnel_vm_payload, tunnel_name): + + client = get_api_client() + + network_group_json = get_network_group_by_tunnel_name(tunnel_name) + + network_group_uuid = network_group_json.get("metadata", {}).get("uuid") + + # Update tunnel reference in tunnel_vm_payload + tunnel_reference = ( + network_group_json.get("status", {}) + .get("resources", {}) + .get("tunnel_reference", {}) + ) + tunnel_vm_payload["spec"]["resources"]["tunnel_reference"] = tunnel_reference + + res, err = client.network_group.reset_network_group_tunnel_vm( + network_group_uuid, tunnel_vm_payload + ) + if err: + LOG.info("Failed to create network group tunnel VM due to :{}".format(err)) + sys.exit(-1) + + create_response = res.json() + + # Modify spec so that watch_tunnel_creation does not break + network_group_json["spec"] = network_group_json["status"] + + watch_tunnel_creation(network_group_json, create_response) + + +def create_network_group_tunnel(payload): + + client = get_api_client() + + res, err = client.network_group.create_network_group_tunnel(payload) + if err: + LOG.info("Failed to create network group tunnel due to :{}".format(err)) + sys.exit(-1) + + response = res.json() + + return watch_tunnel_creation(payload, response) + + +def watch_tunnel_creation(payload, response): + + client = get_api_client() + + LOG.info("Tunnel setup task details: {}".format(response)) + tunnel_setup_task_uuid = response["request_id"] + + stdout_dict = { + "name": payload["metadata"]["name"], + "tunnel_setup_task_uuid": tunnel_setup_task_uuid, + "tunnel_name": payload["spec"]["resources"]["tunnel_reference"]["name"], + "tunnel_uuid": payload["spec"]["resources"]["tunnel_reference"]["uuid"], + } + + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + LOG.info("Polling on network group tunnel creation task") + + task_state, _ = watch_network_group_tunnel_launch_task( + tunnel_setup_task_uuid, poll_interval=4 + ) + + if task_state in NETWORK_GROUP_TUNNEL_TASK.FAILURE_STATES: + LOG.exception( + "Network Group Tunnel creation task went to {} state".format(task_state) + ) + sys.exit(-1) + + # Now monitor the app state and wait for it to go into Terminal state because launch was successful + account_uuid = payload["spec"]["resources"]["account_reference"]["uuid"] + + network_group_json, app_state = watch_network_group_tunnel_app( + account_uuid, payload["metadata"]["name"], poll_interval=4 + ) + + # Collect Tunnel Application information + stdout_dict["application_status"] = app_state + stdout_dict["application_uuid"] = ( + network_group_json.get("status", {}).get("resources", {}).get("app_uuid") + ) + + # Collect Tunnel Status information + res, err = client.tunnel.read( + payload["spec"]["resources"]["tunnel_reference"]["uuid"] + ) + if err: + LOG.error(err) + sys.exit(-1) + tunnel_json = res.json() + + tunnel_state = tunnel_json.get("status", {}).get("state") + stdout_dict["tunnel_state"] = tunnel_state_mapper(tunnel_state) + stdout_dict["tunne_vm_name"] = ( + network_group_json.get("status", {}).get("resources", {}).get("tunnel_vm_name") + ) + + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + return network_group_json + + +def create_network_group_tunnel_from_dsl( + network_group_tunnel_file, tunnel_name="", description="" +): + + user_network_group_tunnel_module = get_network_group_tunnel_module_from_file( + network_group_tunnel_file + ) + + UserProject = get_network_group_tunnel_class_from_module( + user_network_group_tunnel_module + ) + if UserProject is None: + LOG.error("User project not found in {}".format(network_group_tunnel_file)) + return + + network_group_tunnel_payload = compile_network_group_tunnel_dsl_class(UserProject) + + if tunnel_name: + network_group_tunnel_payload["metadata"]["name"] = tunnel_name + "_ng" + network_group_tunnel_payload["spec"]["name"] = tunnel_name + "_ng" + network_group_tunnel_payload["spec"]["resources"]["tunnel_reference"][ + "name" + ] = tunnel_name + + LOG.debug("Payload: {}".format(network_group_tunnel_payload)) + + network_group_json = create_network_group_tunnel(network_group_tunnel_payload) + + if network_group_json: + LOG.info("Updating cache...") + Cache.sync_table(CACHE.ENTITY.AHV_VPC) + LOG.info("[Done]") + + +def create_network_group_tunnel_vm_from_dsl( + network_group_tunnel_vm_file, network_group_tunnel_name +): + user_network_group_tunnel_vm_module = get_network_group_tunnel_vm_module_from_file( + network_group_tunnel_vm_file + ) + + UserProject = get_network_group_tunnel_vm_class_from_module( + user_network_group_tunnel_vm_module + ) + if UserProject is None: + LOG.error("User project not found in {}".format(network_group_tunnel_vm_file)) + return + + network_group_tunnel_vm_payload = compile_network_group_tunnel_vm_dsl_class( + UserProject, network_group_tunnel_name + ) + LOG.debug("Payload: {}".format(network_group_tunnel_vm_payload)) + + network_group_json = create_network_group_tunnel_vm( + network_group_tunnel_vm_payload, network_group_tunnel_name + ) + + if network_group_json: + LOG.info("Updating cache...") + Cache.sync_table(CACHE.ENTITY.AHV_VPC) + LOG.info("[Done]") + + +def get_network_group_from_tunnel_name(tunnel_name): + network_group_json = get_network_group_by_name(None, None, tunnel_name) + if not network_group_json: + LOG.error("Failed to find tunnel with name: {}".format(tunnel_name)) + sys.exit(-1) + return network_group_json + + +def delete_network_group_tunnel(network_group_tunnel_names): + + client = get_api_client() + + for tunnel_name in network_group_tunnel_names: + network_group = get_network_group_from_tunnel_name(tunnel_name) + + network_group_uuid = network_group.get("metadata", {}).get("uuid") + app_uuid = network_group.get("status", {}).get("resources", {}).get("app_uuid") + res, err = client.application.read(app_uuid) + if err: + LOG.error("Failed to fetch Tunnel Application due to {}".format(err)) + sys.exit(-1) + app_info = res.json() + app_name = app_info.get("metadata", {}).get("name") + + tunnel_uuid = ( + network_group.get("status", {}) + .get("resources", {}) + .get("tunnel_reference", {}) + .get("uuid") + ) + LOG.info( + "Triggering Delete of Tunnel: {}, UUID: {}, Network Group UUID: {}".format( + tunnel_name, tunnel_uuid, network_group_uuid + ) + ) + res, err = client.network_group.delete_tunnel(network_group_uuid, tunnel_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + LOG.info("Delete of Network Group Tunnel triggered successfully") + response = res.json() + runlog_id = response["status"]["runlog_uuid"] + + LOG.info("Action runlog uuid: {}".format(runlog_id)) + LOG.info("Application UUID: {}".format(app_uuid)) + LOG.info("Application name: {}".format(app_name)) diff --git a/framework/calm/dsl/cli/network_group_commands.py b/framework/calm/dsl/cli/network_group_commands.py new file mode 100644 index 0000000..6933968 --- /dev/null +++ b/framework/calm/dsl/cli/network_group_commands.py @@ -0,0 +1,168 @@ +import click +import sys + +from calm.dsl.cli.projects import create_project_from_dsl + +from .network_group import ( + create_network_group_tunnel_from_dsl, + create_network_group_tunnel_vm_from_dsl, + delete_network_group_tunnel, + describe_network_group, + describe_network_group_tunnel, + get_network_group_tunnels, + get_network_groups, +) + +from .main import create, get, delete, describe, reset +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.command("network-groups", feature_min_version="3.5.0") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only project names" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_network_groups(limit, offset, quiet, out): + """Get Network Groups, optionally filtered by a string""" + + get_network_groups(limit, offset, quiet, out) + + +@get.command("network-group-tunnels", feature_min_version="3.5.0") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only project names" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_network_group_tunnels(limit, offset, quiet, out): + """Get Network Group Tunnels, optionally filtered by a string""" + + get_network_group_tunnels(limit, offset, quiet, out) + + +@describe.command("network-group", feature_min_version="3.5.0") +@click.argument("network_group_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_network_group(network_group_name, out): + """Describe a Network Group""" + + describe_network_group(network_group_name, out) + + +@describe.command("network-group-tunnel", feature_min_version="3.5.0") +@click.argument("network_group_tunnel_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_network_group(network_group_tunnel_name, out): + """Describe a Network Group Tunnel""" + + describe_network_group_tunnel(network_group_tunnel_name, out) + + +@create.command("network-group-tunnel", feature_min_version="3.5.0") +@click.option( + "--file", + "-f", + "network_group_tunnel_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Network Group Tunnel spec file to upload", + required=True, +) +@click.option( + "--name", + "-n", + "name", + type=str, + default="", + help="Network Group Tunnel name(optional)", +) +@click.option( + "--description", "-d", default=None, help="Network Group description (Optional)" +) +def _create_network_group_tunnel(name, network_group_tunnel_file, description): + """Creates a Network Group and its Tunnel""" + + if network_group_tunnel_file.endswith(".py"): + create_network_group_tunnel_from_dsl( + network_group_tunnel_file, name, description + ) + else: + LOG.error("Unknown file format") + return + + +@delete.command("network-group-tunnel") +@click.argument("network_group_tunnel_names", nargs=-1) +def _delete_network_group_tunnel(network_group_tunnel_names): + """Deletes a Network Group and its Tunnel""" + + delete_network_group_tunnel(network_group_tunnel_names) + + +@reset.command( + "network-group-tunnel-vm", feature_min_version="3.7.0", experimental=True +) +@click.option( + "--file", + "-f", + "network_group_tunnel_vm_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Network Group Tunnel VM spec file to upload", + required=True, +) +@click.option( + "--tunnel_name", + "-n", + "network_group_tunnel_name", + type=str, + required=True, + help="Network Group Tunnel name", +) +def _reset_network_group_tunnel_vm( + network_group_tunnel_vm_file, network_group_tunnel_name +): + """Deploy a new Tunnel VM for a Network Group Tunnel""" + + if network_group_tunnel_vm_file.endswith(".py"): + create_network_group_tunnel_vm_from_dsl( + network_group_tunnel_vm_file, network_group_tunnel_name + ) + else: + LOG.error("Unknown file format") + return diff --git a/framework/calm/dsl/cli/project_commands.py b/framework/calm/dsl/cli/project_commands.py new file mode 100644 index 0000000..aa9b200 --- /dev/null +++ b/framework/calm/dsl/cli/project_commands.py @@ -0,0 +1,261 @@ +import click +import sys + +from .projects import ( + get_projects, + compile_project_command, + create_project_from_dsl, + describe_project, + delete_project, + update_project_from_dsl, + update_project_using_cli_switches, +) +from .main import create, get, update, delete, describe, compile +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.command("projects") +@click.option("--name", "-n", default=None, help="Search for projects by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter projects by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only project names" +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_projects(name, filter_by, limit, offset, quiet, out): + """Get projects, optionally filtered by a string""" + + get_projects(name, filter_by, limit, offset, quiet, out) + + +@compile.command("project") +@click.option( + "--file", + "-f", + "project_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Project file", +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["json", "yaml"]), + default="json", + help="output format", +) +def _compile_project_command(project_file, out): + """Compiles a DSL (Python) project into JSON or YAML""" + compile_project_command(project_file, out) + + +@create.command("project") +@click.option( + "--file", + "-f", + "project_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Project file to upload", + required=True, +) +@click.option( + "--name", "-n", "project_name", type=str, default="", help="Project name(optional)" +) +@click.option( + "--description", "-d", default=None, help="Blueprint description (Optional)" +) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +def _create_project(project_file, project_name, description, no_cache_update): + """Creates a project""" + + if project_file.endswith(".py"): + create_project_from_dsl( + project_file, project_name, description, no_cache_update + ) + else: + LOG.error("Unknown file format") + return + + +@delete.command("project") +@click.argument("project_names", nargs=-1) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +def _delete_project(project_names, no_cache_update): + """Deletes a project""" + + delete_project(project_names, no_cache_update) + + +@describe.command("project") +@click.argument("project_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _describe_project(project_name, out): + """Describe a project""" + + describe_project(project_name, out) + + +@update.command("project") +@click.argument("project_name") +@click.option( + "--file", + "-f", + "project_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path of Project file to upload", +) +@click.option( + "--add_user", + "-au", + "add_user_list", + help="name of user to be added", + multiple=True, + default=[], +) +@click.option( + "--add_group", + "-ag", + "add_group_list", + help="name of group to be added", + multiple=True, + default=[], +) +@click.option( + "--add_account", + "-aa", + "add_account_list", + help="name of account to be added", + multiple=True, + default=[], +) +@click.option( + "--remove_account", + "-ra", + "remove_account_list", + help="name of account to be removed", + multiple=True, + default=[], +) +@click.option( + "--remove_user", + "-ru", + "remove_user_list", + help="name of user to be removed", + multiple=True, + default=[], +) +@click.option( + "--remove_group", + "-rg", + "remove_group_list", + help="name of group to be removed", + multiple=True, + default=[], +) +@click.option( + "--no-cache-update", + "no_cache_update", + is_flag=True, + default=False, + help="if true, cache is not updated for project", +) +@click.option( + "--append-only", + "append_only", + is_flag=True, + default=False, + help="if true, will only append the users, groups, subnets, external networks, accounts, vpc and cluster from the project_file", +) +def _update_project( + project_name, + project_file, + add_user_list, + add_group_list, + add_account_list, + remove_account_list, + remove_user_list, + remove_group_list, + no_cache_update, + append_only, +): + """ + Updates a project. + + \b + Usability: + a. If project_file is given, command will use file to update project. Environment updation is not allowed + b. If project_file is not given , project will be updated based on other cli switches + i.e. add_user, add_group, remove_user, remove_group + c. Project ACPs will be updated synchronously you remove users/groups from project + """ + + if not ( + project_file + or add_user_list + or add_group_list + or add_account_list + or remove_account_list + or remove_user_list + or remove_group_list + ): + LOG.error( + "Either project file or add/remove paramters for users/groups should be given" + ) + sys.exit(-1) + + if project_file: + if project_file.endswith(".py"): + update_project_from_dsl( + project_name=project_name, + project_file=project_file, + no_cache_update=no_cache_update, + append_only=append_only, + ) + return + else: + LOG.error("Unknown file format") + return + + update_project_using_cli_switches( + project_name=project_name, + add_user_list=add_user_list, + add_group_list=add_group_list, + remove_user_list=remove_user_list, + remove_group_list=remove_group_list, + add_account_list=add_account_list, + remove_account_list=remove_account_list, + ) diff --git a/framework/calm/dsl/cli/projects.py b/framework/calm/dsl/cli/projects.py new file mode 100644 index 0000000..fc5d50b --- /dev/null +++ b/framework/calm/dsl/cli/projects.py @@ -0,0 +1,1404 @@ +from inspect import getargs +import time +import click +import arrow +import json +import sys +import copy +from distutils.version import LooseVersion as LV +from prettytable import PrettyTable +from ruamel import yaml + +from calm.dsl.builtins import create_project_payload, Project +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.config import get_context + +from .utils import get_name_query, highlight_text +from .environments import create_environment_from_dsl_class +from calm.dsl.tools import get_module_from_file +from calm.dsl.log import get_logging_handle +from calm.dsl.providers import get_provider +from calm.dsl.builtins.models.helper.common import get_project +from calm.dsl.store import Cache, Version +from calm.dsl.constants import CACHE, PROJECT_TASK + +LOG = get_logging_handle(__name__) + + +def get_projects(name, filter_by, limit, offset, quiet, out): + """Get the projects, optionally filtered by a string""" + + client = get_api_client() + ContextObj = get_context() + server_config = ContextObj.get_server_config() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + # right now there is no support for filter by state of project + + if filter_query: + params["filter"] = filter_query + + res, err = client.project.list(params=params) + + if err: + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch projects from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No project found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "STATE", + "OWNER", + "USER COUNT", + "CREATED ON", + "LAST UPDATED", + "UUID", + ] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + creation_time = arrow.get(metadata["creation_time"]).timestamp + last_update_time = arrow.get(metadata["last_update_time"]) + if "owner_reference" in metadata: + owner_reference_name = metadata["owner_reference"]["name"] + else: + owner_reference_name = "-" + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["state"]), + highlight_text(owner_reference_name), + highlight_text(len(row["resources"]["user_reference_list"])), + highlight_text(time.ctime(creation_time)), + "{}".format(last_update_time.humanize()), + highlight_text(metadata["uuid"]), + ] + ) + click.echo(table) + + +def watch_project_task(project_uuid, task_uuid, poll_interval=4): + """poll project tasks""" + + client = get_api_client() + cnt = 0 + while True: + LOG.info("Fetching status of project task (uuid={})".format(task_uuid)) + res, err = client.project.read_pending_task(project_uuid, task_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + status = res["status"]["state"] + LOG.info(status) + + if status in PROJECT_TASK.TERMINAL_STATES: + message_list = res["status"].get("message_list") + if status != PROJECT_TASK.STATUS.SUCCESS and message_list: + LOG.error(message_list) + return status + + time.sleep(poll_interval) + cnt += 1 + if cnt == 10: + break + + LOG.info( + "Task couldn't reached to terminal state in {} seconds. Exiting...".format( + poll_interval * 10 + ) + ) + + +def convert_groups_to_lowercase(group_list): + group_mutable_list = [] + for group in group_list: + group_mutable_list.append(group.lower()) + group_list = tuple(group_mutable_list) + return group_list + + +def get_project_module_from_file(project_file): + """Returns Project module given a user project dsl file (.py)""" + return get_module_from_file("calm.dsl.user_project", project_file) + + +def get_project_class_from_module(user_project_module): + """Returns project class given a module""" + + UserProject = None + for item in dir(user_project_module): + obj = getattr(user_project_module, item) + if isinstance(obj, type(Project)): + if obj.__bases__[0] == Project: + UserProject = obj + + return UserProject + + +def compile_project_dsl_class(project_class): + envs = [] + if hasattr(project_class, "envs"): + envs = getattr(project_class, "envs", []) + project_class.envs = [] + if hasattr(project_class, "default_environment"): + project_class.default_environment = {} + + # Adding environment infra to project + for env in envs: + providers = getattr(env, "providers", []) + for env_pdr in providers: + env_pdr_account = env_pdr.account_reference.get_dict() + _a_found = False + for proj_pdr in getattr(project_class, "providers", []): + proj_pdr_account = proj_pdr.account_reference.get_dict() + if env_pdr_account["name"] == proj_pdr_account["name"]: + _a_found = True + + # If env account subnets not present in project, then add them by default + if proj_pdr.type == "nutanix_pc": + env_pdr_subnets = env_pdr.subnet_reference_list + env_pdr_ext_subnets = env_pdr.external_network_list + + proj_pdr_subnets = proj_pdr.subnet_reference_list + proj_pdr_ext_subnets = proj_pdr.external_network_list + + for _s in env_pdr_subnets: + _s_uuid = _s.get_dict()["uuid"] + _s_found = False + + for _ps in proj_pdr_subnets: + if _ps.get_dict()["uuid"] == _s_uuid: + _s_found = True + break + + if not _s_found: + proj_pdr.subnet_reference_list.append(_s) + + for _s in env_pdr_ext_subnets: + _s_uuid = _s.get_dict()["uuid"] + _s_found = False + + for _ps in proj_pdr_ext_subnets: + if _ps.get_dict()["uuid"] == _s_uuid: + _s_found = True + break + + if not _s_found: + proj_pdr.external_network_list.append(_s) + + # If environment account not available in project add it to project + if not _a_found: + project_class.providers.append(env_pdr) + + project_payload = None + UserProjectPayload, _ = create_project_payload(project_class) + project_payload = UserProjectPayload.get_dict() + + return project_payload + + +def compile_project_command(project_file, out): + + user_project_module = get_project_module_from_file(project_file) + UserProject = get_project_class_from_module(user_project_module) + if UserProject is None: + LOG.error("User project not found in {}".format(project_file)) + return + + project_payload = compile_project_dsl_class(UserProject) + + if out == "json": + click.echo(json.dumps(project_payload, indent=4, separators=(",", ": "))) + elif out == "yaml": + click.echo(yaml.dump(project_payload, default_flow_style=False)) + else: + LOG.error("Unknown output format {} given".format(out)) + + +def create_project(project_payload, name="", description=""): + + client = get_api_client() + + project_payload.pop("status", None) + + if name: + project_payload["spec"]["name"] = name + project_payload["metadata"]["name"] = name + else: + name = project_payload["spec"]["name"] + + if description: + project_payload["spec"]["description"] = description + + LOG.info("Creating project '{}'".format(name)) + res, err = client.project.create(project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + project = res.json() + stdout_dict = { + "name": project["spec"]["name"], + "uuid": project["metadata"]["uuid"], + "execution_context": project["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + LOG.info("Polling on project creation task") + task_state = watch_project_task( + project["metadata"]["uuid"], + project["status"]["execution_context"]["task_uuid"], + poll_interval=4, + ) + if task_state in PROJECT_TASK.FAILURE_STATES: + LOG.exception("Project creation task went to {} state".format(task_state)) + sys.exit(-1) + + return stdout_dict + + +def update_project(project_uuid, project_payload): + + client = get_api_client() + calm_version = Version.get_version("Calm") + + project_payload.pop("status", None) + res, err = client.project.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + project = res.json() + if LV(calm_version) >= LV("3.5.2") and LV(calm_version) < LV("3.6.1"): + project_name = project["spec"]["project_detail"]["name"] + else: + project_name = project["spec"]["name"] + stdout_dict = { + "name": project_name, + "uuid": project["metadata"]["uuid"], + "execution_context": project["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + LOG.info("Polling on project updation task") + task_state = watch_project_task( + project["metadata"]["uuid"], project["status"]["execution_context"]["task_uuid"] + ) + if task_state in PROJECT_TASK.FAILURE_STATES: + LOG.exception("Project updation task went to {} state".format(task_state)) + sys.exit(-1) + + return stdout_dict + + +def create_project_from_dsl( + project_file, project_name, description="", no_cache_update=False +): + """Steps: + 1. Creation of project without env + 2. Creation of env + 3. Updation of project for adding env details + """ + + client = get_api_client() + + user_project_module = get_project_module_from_file(project_file) + UserProject = get_project_class_from_module(user_project_module) + if UserProject is None: + LOG.error("User project not found in {}".format(project_file)) + return + + envs = [] + if hasattr(UserProject, "envs"): + envs = getattr(UserProject, "envs", []) + + default_environment_name = "" + if ( + hasattr(UserProject, "default_environment") + and UserProject.default_environment is not None + ): + default_environment = getattr(UserProject, "default_environment", None) + UserProject.default_environment = {} + default_environment_name = default_environment.__name__ + + if envs and not default_environment_name: + default_environment_name = envs[0].__name__ + + calm_version = Version.get_version("Calm") + if LV(calm_version) < LV("3.2.0"): + for _env in envs: + env_name = _env.__name__ + LOG.info( + "Searching for existing environments with name '{}'".format(env_name) + ) + res, err = client.environment.list({"filter": "name=={}".format(env_name)}) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + if res["metadata"]["total_matches"]: + LOG.error("Environment with name '{}' already exists".format(env_name)) + + LOG.info("No existing environment found with name '{}'".format(env_name)) + + if envs and no_cache_update: + LOG.error("Environment create is not allowed when cache update is disabled") + return + + # Creation of project + project_payload = compile_project_dsl_class(UserProject) + project_data = create_project( + project_payload, name=project_name, description=description + ) + project_name = project_data["name"] + project_uuid = project_data["uuid"] + + # Update project in cache + LOG.info("Updating projects cache") + Cache.add_one(entity_type=CACHE.ENTITY.PROJECT, uuid=project_uuid) + LOG.info("[Done]") + + if envs: + + # As ahv helpers in environment should use account from project accounts + # updating the context + ContextObj = get_context() + ContextObj.update_project_context(project_name=project_name) + + default_environment_ref = {} + + # Create environment + env_ref_list = [] + for env_obj in envs: + env_res_data = create_environment_from_dsl_class(env_obj) + env_ref = {"kind": "environment", "uuid": env_res_data["uuid"]} + env_ref_list.append(env_ref) + if ( + default_environment_name + and env_res_data["name"] == default_environment_name + ): + default_environment_ref = env_ref + + LOG.info("Updating project '{}' for adding environment".format(project_name)) + project_payload = get_project(project_uuid=project_uuid) + + project_payload.pop("status", None) + project_payload["spec"]["resources"][ + "environment_reference_list" + ] = env_ref_list + + default_environment_ref = default_environment_ref or { + "kind": "environment", + "uuid": env_ref_list[0]["uuid"], + } + + # default_environment_reference added in 3.2 + calm_version = Version.get_version("Calm") + if LV(calm_version) >= LV("3.2.0"): + project_payload["spec"]["resources"][ + "default_environment_reference" + ] = default_environment_ref + + update_project(project_uuid=project_uuid, project_payload=project_payload) + + # Reset the context changes + ContextObj.reset_configuration() + + if no_cache_update: + LOG.info("Skipping environments cache update") + else: + # Update environments in cache + LOG.info("Updating environments cache ...") + for _e_item in env_ref_list: + Cache.add_one( + entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=_e_item["uuid"] + ) + LOG.info("[Done]") + + +def describe_project(project_name, out): + + client = get_api_client() + project = get_project(project_name) + + if out == "json": + click.echo(json.dumps(project, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Project Summary----\n") + click.echo( + "Name: " + + highlight_text(project_name) + + " (uuid: " + + highlight_text(project["metadata"]["uuid"]) + + ")" + ) + + click.echo("Status: " + highlight_text(project["status"]["state"])) + click.echo( + "Owner: " + highlight_text(project["metadata"]["owner_reference"]["name"]) + ) + + created_on = arrow.get(project["metadata"]["creation_time"]) + past = created_on.humanize() + click.echo( + "Created on: {} ({})".format( + highlight_text(time.ctime(created_on.timestamp)), highlight_text(past) + ) + ) + + project_resources = project["status"].get("resources", {}) + environments = project_resources.get("environment_reference_list", []) + click.echo("Environment Registered: ", nl=False) + + if not environments: + click.echo(highlight_text("No")) + else: # Handle Multiple Environments + click.echo( + "{} ( uuid: {} )".format(highlight_text("Yes"), environments[0]["uuid"]) + ) + + users = project_resources.get("user_reference_list", []) + if users: + user_uuid_name_map = client.user.get_uuid_name_map({"length": 1000}) + click.echo("\nRegistered Users: \n--------------------") + for user in users: + click.echo("\t" + highlight_text(user_uuid_name_map[user["uuid"]])) + + groups = project_resources.get("external_user_group_reference_list", []) + if groups: + usergroup_uuid_name_map = client.group.get_uuid_name_map({"length": 1000}) + click.echo("\nRegistered Groups: \n--------------------") + for group in groups: + click.echo("\t" + highlight_text(usergroup_uuid_name_map[group["uuid"]])) + + click.echo("\nInfrastructure: \n---------------") + + subnets_list = [] + for subnet in project_resources["subnet_reference_list"]: + subnets_list.append(subnet["uuid"]) + + # Extending external subnet's list from remote account + for subnet in project_resources.get("external_network_list", []): + subnets_list.append(subnet["uuid"]) + + clusters_list = [] + for cluster in project_resources.get("cluster_reference_list", []): + clusters_list.append(cluster["uuid"]) + + vpcs_list = [] + for vpc in project_resources.get("vpc_reference_list", []): + vpcs_list.append(vpc["uuid"]) + + accounts = project_resources["account_reference_list"] + for account in accounts: + account_uuid = account["uuid"] + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=account_uuid + ) + if not account_cache_data: + LOG.error( + "Account (uuid={}) not found. Please update cache".format(account_uuid) + ) + sys.exit(-1) + + account_type = account_cache_data["provider_type"] + click.echo("\nAccount Type: " + highlight_text(account_type.upper())) + click.echo( + "Name: {} (uuid: {})".format( + highlight_text(account_cache_data["name"]), + highlight_text(account_cache_data["uuid"]), + ) + ) + + if account_type == "nutanix_pc" and subnets_list: + AhvVmProvider = get_provider("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + filter_query = "_entity_id_=={}".format("|".join(subnets_list)) + nics = AhvObj.subnets(account_uuid=account_uuid, filter_query=filter_query) + nics = nics["entities"] + + # passing entity ids in filter doesn't work for clusters list call + clusters = AhvObj.clusters(account_uuid=account_uuid).get("entities", []) + vpcs = AhvObj.vpcs(account_uuid=account_uuid).get("entities", []) + + vpc_uuid_name_map = {} + + click.echo("\n\tWhitelisted Clusters:\n\t--------------------") + for cluster in clusters: + if cluster["metadata"]["uuid"] in clusters_list: + click.echo( + "\tName: {} (uuid: {})".format( + highlight_text(cluster["status"]["name"]), + highlight_text(cluster["metadata"]["uuid"]), + ) + ) + + click.echo("\n\tWhitelited VPCs:\n\t--------------------") + for vpc in vpcs: + if vpc["metadata"]["uuid"] in vpcs_list: + vpc_name = vpc["status"]["name"] + click.echo( + "\tName: {} (uuid: {})".format( + highlight_text(vpc_name), + highlight_text(vpc["metadata"]["uuid"]), + ) + ) + vpc_uuid_name_map[vpc["metadata"]["uuid"]] = vpc_name + + click.echo("\n\tWhitelisted Subnets:\n\t--------------------") + overlay_nics = [] + for nic in nics: + if nic["status"]["resources"].get("subnet_type", "") != "VLAN": + overlay_nics.append(nic) + continue + + nic_name = nic["status"]["name"] + vlan_id = nic["status"]["resources"]["vlan_id"] + cluster_name = nic["status"]["cluster_reference"]["name"] + nic_uuid = nic["metadata"]["uuid"] + + click.echo( + "\tName: {} (uuid: {})\tType: VLAN\tVLAN ID: {}\tCluster Name: {}".format( + highlight_text(nic_name), + highlight_text(nic_uuid), + highlight_text(vlan_id), + highlight_text(cluster_name), + ) + ) + for nic in overlay_nics: + nic_name = nic["status"]["name"] + nic_uuid = nic["metadata"]["uuid"] + vpc_name = vpc_uuid_name_map.get( + nic["status"]["resources"]["vpc_reference"]["uuid"], "" + ) + if vpc_name: + click.echo( + "\tName: {} (uuid: {})\tType: Overlay\tVPC Name: {}".format( + highlight_text(nic_name), + highlight_text(nic_uuid), + highlight_text(vpc_name), + ) + ) + else: + click.echo( + "\tName: {} (uuid: {})\tType: Overlay".format( + highlight_text(nic_name), + highlight_text(nic_uuid), + ) + ) + + if not accounts: + click.echo(highlight_text("No provider's account registered")) + + quota_resources = project_resources.get("resource_domain", {}).get("resources", []) + if quota_resources: + click.echo("\nQuotas: \n-------") + for qr in quota_resources: + qk = qr["resource_type"] + qv = qr["limit"] + if qr["units"] == "BYTES": + qv = qv // 1073741824 + qv = str(qv) + " (GiB)" + + click.echo("\t{} : {}".format(qk, highlight_text(qv))) + + +def delete_project(project_names, no_cache_update=False): + + client = get_api_client() + project_name_uuid_map = client.project.get_name_uuid_map() + deleted_projects_uuids = [] + for project_name in project_names: + project_id = project_name_uuid_map.get(project_name, "") + if not project_id: + LOG.warning("Project {} not found.".format(project_name)) + continue + + LOG.info("Deleting project '{}'".format(project_name)) + res, err = client.project.delete(project_id) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + continue + + deleted_projects_uuids.append(project_id) + + LOG.info("Polling on project deletion task") + res = res.json() + task_state = watch_project_task( + project_id, res["status"]["execution_context"]["task_uuid"], poll_interval=4 + ) + if task_state in PROJECT_TASK.FAILURE_STATES: + LOG.exception("Project deletion task went to {} state".format(task_state)) + sys.exit(-1) + + # Update projects in cache if any project has been deleted + if deleted_projects_uuids: + if no_cache_update: + LOG.info("skipping projects cache update") + else: + LOG.info("Updating projects cache ...") + for _proj_id in deleted_projects_uuids: + Cache.delete_one(entity_type=CACHE.ENTITY.PROJECT, uuid=_proj_id) + LOG.info("[Done]") + + +def update_project_from_dsl( + project_name, project_file, no_cache_update=False, append_only=False +): + """ + Flow: + 1. compile to get project_payload from the file + 2. If apppend_only, then update using old project data + 3. Calculate the data for acp updations + 4. If not append only, then do project_usage calculation + 5. Update project: PUT call + 6. If project is updated successfully, then do acp updations + """ + + client = get_api_client() + calm_version = Version.get_version("Calm") + + user_project_module = get_project_module_from_file(project_file) + UserProject = get_project_class_from_module(user_project_module) + if UserProject is None: + LOG.error("User project not found in {}".format(project_file)) + return + + # Environment updation is not allowed using dsl file + if hasattr(UserProject, "envs"): + UserProject.envs = [] + + project_payload = compile_project_dsl_class(UserProject) + + LOG.info("Fetching project '{}' details".format(project_name)) + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + project_uuid = project_name_uuid_map.get(project_name, "") + + if not project_uuid: + LOG.error("Project {} not found.".format(project_name)) + sys.exit(-1) + + res, err = client.project.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + old_project_payload = res.json() + + if append_only: + update_payload_from_old_project_data( + project_payload, copy.deepcopy(old_project_payload) + ) + + # Find users already registered + updated_project_user_list = [] + for _user in project_payload["spec"]["resources"].get("user_reference_list", []): + updated_project_user_list.append(_user["name"]) + + updated_project_groups_list = [] + for _group in project_payload["spec"]["resources"].get( + "external_user_group_reference_list", [] + ): + updated_project_groups_list.append(_group["name"]) + + acp_remove_user_list = [] + acp_remove_group_list = [] + + for _user in old_project_payload["spec"]["resources"].get( + "user_reference_list", [] + ): + if _user["name"] not in updated_project_user_list: + acp_remove_user_list.append(_user["name"]) + + for _group in old_project_payload["spec"]["resources"].get( + "external_user_group_reference_list", [] + ): + if _group["name"] not in updated_project_groups_list: + acp_remove_group_list.append(_group["name"]) + + # Environment updation is not allowed, so adding existing environments + old_env_refs = old_project_payload["spec"]["resources"].get( + "environment_reference_list", [] + ) + if old_env_refs: + project_payload["spec"]["resources"][ + "environment_reference_list" + ] = old_env_refs + + default_env_ref = old_project_payload["spec"]["resources"].get( + "default_environment_reference", {} + ) + if default_env_ref: + project_payload["spec"]["resources"][ + "default_environment_reference" + ] = default_env_ref + + if not append_only: + project_usage_payload = get_project_usage_payload( + project_payload, old_project_payload + ) + + LOG.info("Checking project usage") + res, err = client.project.usage(project_uuid, project_usage_payload) + + if err: + LOG.error(err) + sys.exit(-1) + + project_usage = res.json() + msg_list = [] + should_update_project = is_project_updation_allowed(project_usage, msg_list) + if not should_update_project: + LOG.error("Project updation failed") + click.echo("\n".join(msg_list)) + click.echo( + json.dumps( + project_usage["status"].get("resources", {}), + indent=4, + separators=(",", ": "), + ) + ) + sys.exit(-1) + + # Setting correct metadata for update call + project_payload["metadata"] = old_project_payload["metadata"] + + # As name of project is not editable + project_payload["spec"]["name"] = project_name + project_payload["metadata"]["name"] = project_name + + # TODO removed users should be removed from acps also. + LOG.info("Updating project '{}'".format(project_name)) + res, err = client.project.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + if LV(calm_version) >= LV("3.5.2") and LV(calm_version) < LV("3.6.1"): + name = res["spec"]["project_detail"]["name"] + else: + name = res["spec"]["name"] + + stdout_dict = { + "name": name, + "uuid": res["metadata"]["uuid"], + "execution_context": res["status"]["execution_context"], + } + + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + LOG.info("Polling on project updation task") + task_state = watch_project_task( + project_uuid, res["status"]["execution_context"]["task_uuid"], poll_interval=4 + ) + if task_state not in PROJECT_TASK.FAILURE_STATES: + # Remove project removed user and groups from acps + if acp_remove_user_list or acp_remove_group_list: + LOG.info("Updating project acps") + remove_users_from_project_acps( + project_uuid=project_uuid, + remove_user_list=acp_remove_user_list, + remove_group_list=acp_remove_group_list, + ) + else: + LOG.exception("Project updation task went to {} state".format(task_state)) + sys.exit(-1) + + if no_cache_update: + LOG.info("Skipping projects cache update") + else: + LOG.info("Updating projects cache ...") + Cache.update_one(entity_type=CACHE.ENTITY.PROJECT, uuid=project_uuid) + LOG.info("[Done]") + + +def update_project_using_cli_switches( + project_name, + add_user_list, + add_group_list, + add_account_list, + remove_account_list, + remove_user_list, + remove_group_list, +): + + client = get_api_client() + calm_version = Version.get_version("Calm") + + LOG.info("Fetching project '{}' details".format(project_name)) + params = {"length": 250, "filter": "name=={}".format(project_name)} + project_name_uuid_map = client.project.get_name_uuid_map(params) + project_uuid = project_name_uuid_map.get(project_name, "") + + if not project_uuid: + LOG.error("Project {} not found.".format(project_name)) + sys.exit(-1) + + res, err = client.project.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + project_payload = res.json() + project_payload.pop("status", None) + + project_usage_payload = { + "filter": {"account_reference_list": [], "subnet_reference_list": []} + } + project_resources = project_payload["spec"]["resources"] + project_users = [] + project_groups = [] + for user in project_resources.get("user_reference_list", []): + project_users.append(user["name"]) + + for group in project_resources.get("external_user_group_reference_list", []): + project_groups.append(group["name"]) + + # Checking remove users/groups are part of project or not + if not set(remove_user_list).issubset(set(project_users)): + LOG.error( + "Users {} are not registered in project".format( + set(remove_user_list).difference(set(project_users)) + ) + ) + sys.exit(-1) + + remove_group_list = convert_groups_to_lowercase(remove_group_list) + if not set(remove_group_list).issubset(set(project_groups)): + LOG.error( + "Groups {} are not registered in project".format( + set(remove_group_list).difference(set(project_groups)) + ) + ) + sys.exit(-1) + + # Append users + updated_user_reference_list = [] + updated_group_reference_list = [] + + acp_remove_user_list = [] + acp_remove_group_list = [] + + for user in project_resources.get("user_reference_list", []): + if user["name"] not in remove_user_list: + updated_user_reference_list.append(user) + else: + acp_remove_user_list.append(user["name"]) + + for group in project_resources.get("external_user_group_reference_list", []): + if group["name"] not in remove_group_list: + updated_group_reference_list.append(group) + else: + acp_remove_group_list.append(group["name"]) + + user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) + for user in add_user_list: + updated_user_reference_list.append( + {"kind": "user", "name": user, "uuid": user_name_uuid_map[user]} + ) + + usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) + + add_group_list = convert_groups_to_lowercase(add_group_list) + for group in add_group_list: + updated_group_reference_list.append( + { + "kind": "user_group", + "name": group, + "uuid": usergroup_name_uuid_map[group], + } + ) + + project_resources["user_reference_list"] = updated_user_reference_list + project_resources[ + "external_user_group_reference_list" + ] = updated_group_reference_list + + # Updating accounts data + if not set(add_account_list).isdisjoint(set(remove_account_list)): + LOG.error( + "Same accounts found in both added and removing list {}".format( + set(add_account_list).intersection(set(remove_account_list)) + ) + ) + sys.exit("Same accounts found in both added and removing list") + + project_accounts = project_resources.get("account_reference_list", []) + updated_proj_accounts = [] + for _acc in project_accounts: + _acc_uuid = _acc["uuid"] + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=_acc_uuid + ) + if not account_cache_data: + LOG.error( + "Account (uuid={}) not found. Please update cache".format(_acc_uuid) + ) + sys.exit("Account (uuid={}) not found".format(_acc_uuid)) + + if account_cache_data["name"] not in remove_account_list: + updated_proj_accounts.append(_acc) + else: + project_usage_payload["filter"]["account_reference_list"].append(_acc_uuid) + + project_account_uuids = [_e["uuid"] for _e in updated_proj_accounts] + for _acc in add_account_list: + account_cache_data = Cache.get_entity_data(entity_type="account", name=_acc) + if not account_cache_data: + LOG.error("Account (name={}) not found. Please update cache".format(_acc)) + sys.exit("Account (name={}) not found".format(_acc)) + + # Account already present + if account_cache_data["uuid"] in project_account_uuids: + continue + + updated_proj_accounts.append( + {"kind": "account", "name": _acc, "uuid": account_cache_data["uuid"]} + ) + + project_resources["account_reference_list"] = updated_proj_accounts + + LOG.info("Checking project usage") + res, err = client.project.usage(project_uuid, project_usage_payload) + if err: + LOG.error(err) + sys.exit(-1) + + project_usage = res.json() + msg_list = [] + should_update_project = is_project_updation_allowed(project_usage, msg_list) + if not should_update_project: + LOG.error("Project updation failed") + click.echo("\n".join(msg_list)) + click.echo( + json.dumps( + project_usage["status"].get("resources", {}), + indent=4, + separators=(",", ": "), + ) + ) + sys.exit(-1) + + LOG.info("Updating project '{}'".format(project_name)) + res, err = client.project.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + if LV(calm_version) >= LV("3.5.2") and LV(calm_version) < LV("3.6.1"): + name = res["spec"]["project_detail"]["name"] + else: + name = res["spec"]["name"] + stdout_dict = { + "name": name, + "uuid": res["metadata"]["uuid"], + "execution_context": res["status"]["execution_context"], + } + + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + # Remove project removed user and groups from acps + LOG.info("Polling on project updation task") + task_state = watch_project_task( + project_uuid, res["status"]["execution_context"]["task_uuid"], poll_interval=4 + ) + if task_state not in PROJECT_TASK.FAILURE_STATES: + if acp_remove_user_list or acp_remove_group_list: + LOG.info("Updating project acps") + remove_users_from_project_acps( + project_uuid=project_uuid, + remove_user_list=acp_remove_user_list, + remove_group_list=acp_remove_group_list, + ) + else: + LOG.exception("Project updation task went to {} state".format(task_state)) + sys.exit(-1) + + LOG.info("Updating projects cache ...") + Cache.update_one(entity_type=CACHE.ENTITY.PROJECT, uuid=project_uuid) + LOG.info("[Done]") + + +def remove_users_from_project_acps(project_uuid, remove_user_list, remove_group_list): + + client = get_api_client() + ProjectInternalObj = get_resource_api("projects_internal", client.connection) + res, err = ProjectInternalObj.read(project_uuid) + if err: + LOG.error(err) + sys.exit(-1) + + project_payload = res.json() + project_payload.pop("status", None) + + for _acp in project_payload["spec"].get("access_control_policy_list", []): + _acp["operation"] = "UPDATE" + _acp_resources = _acp["acp"]["resources"] + updated_users = [] + updated_groups = [] + + for _user in _acp_resources.get("user_reference_list", []): + if _user["name"] not in remove_user_list: + updated_users.append(_user) + + for _group in _acp_resources.get("user_group_reference_list", []): + if _group["name"] not in remove_group_list: + updated_groups.append(_group) + + _acp_resources["user_reference_list"] = updated_users + _acp_resources["user_group_reference_list"] = updated_groups + + res, err = ProjectInternalObj.update(project_uuid, project_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + LOG.info("Polling on task for updating project ACPS") + watch_project_task( + project_uuid, res["status"]["execution_context"]["task_uuid"], poll_interval=4 + ) + + +def is_project_updation_allowed(project_usage, msg_list): + """ + Returns whether project update is allowed. + Will also update project_usage dict to contain only associate entities + Args: + project_usage (dict): project usage details + Returns: + _eusage (bool): is updation allowed + """ + + def is_entity_used(e_usage): + + entity_used = False + app_cnt = e_usage.pop("app", 0) + if app_cnt: + entity_used = True + e_usage["app"] = app_cnt + + brownfield_cnt = e_usage.get("blueprint", {}).pop("brownfield", 0) + greenfield_cnt = e_usage.get("blueprint", {}).pop("greenfield", 0) + if brownfield_cnt or greenfield_cnt: + entity_used = True + if brownfield_cnt: + e_usage["blueprint"]["brownfield"] = brownfield_cnt + if greenfield_cnt: + e_usage["blueprint"]["greenfield"] = greenfield_cnt + else: + e_usage.pop("blueprint", None) + + endpoint_cnt = e_usage.pop("endpoint", 0) + if endpoint_cnt: + entity_used = True + e_usage["endpoint"] = endpoint_cnt + + environment_cnt = e_usage.pop("environment", 0) + if environment_cnt: + entity_used = True + e_usage["environment"] = environment_cnt + + runbook_cnt = e_usage.pop("runbook", 0) + if runbook_cnt: + entity_used = True + e_usage["runbook"] = runbook_cnt + + return entity_used + + updation_allowed = True + accounts_usage = project_usage["status"]["resources"].get("account_list", []) + for _ac in accounts_usage: + entity_used = is_entity_used(_ac["usage"]) + if entity_used: + updation_allowed = False + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=_ac["uuid"] + ) + msg_list.append( + "Please disassociate the account '{}' (uuid='{}') references from existing entities".format( + account_cache_data["name"], account_cache_data["uuid"] + ) + ) + + subnets_usage = project_usage["status"]["resources"].get("subnet_list", []) + for _snt in subnets_usage: + entity_used = is_entity_used(_snt["usage"]) + if entity_used: + updation_allowed = False + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=_snt["uuid"] + ) + msg_list.append( + "Please disassociate the subnet '{}' (uuid='{}') references from existing entities".format( + subnet_cache_data["name"], subnet_cache_data["uuid"] + ) + ) + + cluster_usage = project_usage["status"]["resources"].get("cluster_list", []) + for _snt in cluster_usage: + entity_used = is_entity_used(_snt["usage"]) + if entity_used: + updation_allowed = False + cluster_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_CLUSTER, uuid=_snt["uuid"] + ) + msg_list.append( + "Please disassociate the cluster '{}' (uuid='{}') references from existing entities".format( + cluster_cache_data["name"], cluster_cache_data["uuid"] + ) + ) + + vpc_usage = project_usage["status"]["resources"].get("vpc_list", []) + for _snt in vpc_usage: + entity_used = is_entity_used(_snt["usage"]) + if entity_used: + updation_allowed = False + vpc_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_VPC, uuid=_snt["uuid"] + ) + msg_list.append( + "Please disassociate the vpc '{}' (uuid='{}') references from existing entities".format( + vpc_cache_data["name"], vpc_cache_data["uuid"] + ) + ) + + return updation_allowed + + +def update_payload_from_old_project_data(project_payload, old_project_payload): + """ + updates the project_payload dict by appending the entities + Args: + project_payload (dict): updated project payload + old_project_payload (dict): original payload before updating + """ + + updated_project_user_list = [] + for _user in project_payload["spec"]["resources"].get("user_reference_list", []): + updated_project_user_list.append(_user["uuid"]) + + updated_project_groups_list = [] + for _group in project_payload["spec"]["resources"].get( + "external_user_group_reference_list", [] + ): + updated_project_groups_list.append(_group["uuid"]) + + updated_project_subnet_reference_list = [] + for _subnet in project_payload["spec"]["resources"].get( + "subnet_reference_list", [] + ): + updated_project_subnet_reference_list.append(_subnet["uuid"]) + + updated_project_external_network_list = [] + for _external_network in project_payload["spec"]["resources"].get( + "external_network_list", [] + ): + updated_project_external_network_list.append(_external_network["uuid"]) + + updated_project_account_reference_list = [] + for _account_reference in project_payload["spec"]["resources"].get( + "account_reference_list", [] + ): + updated_project_account_reference_list.append(_account_reference["uuid"]) + + updated_project_vpc_reference_list = [] + for _vpc_reference in project_payload["spec"]["resources"].get( + "vpc_reference_list", [] + ): + updated_project_vpc_reference_list.append(_vpc_reference["uuid"]) + + updated_project_cluster_reference_list = [] + for _cluster_reference in project_payload["spec"]["resources"].get( + "cluster_reference_list", [] + ): + updated_project_cluster_reference_list.append(_cluster_reference["uuid"]) + + for _user in old_project_payload["spec"]["resources"].get( + "user_reference_list", [] + ): + if _user["uuid"] not in updated_project_user_list: + project_payload["spec"]["resources"]["user_reference_list"].append(_user) + + for _group in old_project_payload["spec"]["resources"].get( + "external_user_group_reference_list", [] + ): + if _group["uuid"] not in updated_project_groups_list: + project_payload["spec"]["resources"][ + "external_user_group_reference_list" + ].append(_group) + + for _subnet in old_project_payload["spec"]["resources"].get( + "subnet_reference_list", [] + ): + if _subnet["uuid"] not in updated_project_subnet_reference_list: + project_payload["spec"]["resources"]["subnet_reference_list"].append( + _subnet + ) + + for _external_network in old_project_payload["spec"]["resources"].get( + "external_network_list", [] + ): + if _external_network["uuid"] not in updated_project_external_network_list: + project_payload["spec"]["resources"]["external_network_list"].append( + _external_network + ) + + for _account_reference in old_project_payload["spec"]["resources"].get( + "account_reference_list", [] + ): + if _account_reference["uuid"] not in updated_project_account_reference_list: + project_payload["spec"]["resources"]["account_reference_list"].append( + _account_reference + ) + + for _vpc_reference in old_project_payload["spec"]["resources"].get( + "vpc_reference_list", [] + ): + if _vpc_reference["uuid"] not in updated_project_vpc_reference_list: + project_payload["spec"]["resources"]["vpc_reference_list"].append( + _vpc_reference + ) + + for _cluster_reference in old_project_payload["spec"]["resources"].get( + "cluster_reference_list", [] + ): + if _cluster_reference["uuid"] not in updated_project_cluster_reference_list: + project_payload["spec"]["resources"]["cluster_reference_list"].append( + _cluster_reference + ) + + +def get_project_usage_payload(project_payload, old_project_payload): + """ + Returns project_usage_payload (dict) which is used to check if project updation is allowed + Args: + project_payload (dict): updated project payload + old_project_payload (dict): original payload before updating + Returns: + project_usage_payload (dict): payload for checking project usage + """ + + # Get the diff in subnet and account payload for project usage + existing_subnets = [ + _subnet["uuid"] + for _subnet in old_project_payload["spec"]["resources"].get( + "subnet_reference_list", [] + ) + ] + existing_subnets.extend( + [ + _subnet["uuid"] + for _subnet in old_project_payload["spec"]["resources"].get( + "external_network_list", [] + ) + ] + ) + + new_subnets = [ + _subnet["uuid"] + for _subnet in project_payload["spec"]["resources"].get( + "subnet_reference_list", [] + ) + ] + new_subnets.extend( + [ + _subnet["uuid"] + for _subnet in project_payload["spec"]["resources"].get( + "external_network_list", [] + ) + ] + ) + + existing_accounts = [ + _acc["uuid"] + for _acc in old_project_payload["spec"]["resources"].get( + "account_reference_list", [] + ) + ] + new_accounts = [ + _acc["uuid"] + for _acc in project_payload["spec"]["resources"].get( + "account_reference_list", [] + ) + ] + + existing_vpcs = [ + _vpc["uuid"] + for _vpc in old_project_payload["spec"]["resources"].get( + "vpc_reference_list", [] + ) + ] + + new_vpcs = [ + _vpc["uuid"] + for _vpc in project_payload["spec"]["resources"].get("vpc_reference_list", []) + ] + + existing_clusters = [ + _cluster["uuid"] + for _cluster in old_project_payload["spec"]["resources"].get( + "cluster_reference_list", [] + ) + ] + + new_clusters = [ + _cluster["uuid"] + for _cluster in project_payload["spec"]["resources"].get( + "cluster_reference_list", [] + ) + ] + + project_usage_payload = { + "filter": { + "subnet_reference_list": list(set(existing_subnets) - set(new_subnets)), + "account_reference_list": list(set(existing_accounts) - set(new_accounts)), + "vpc_reference_list": list(set(existing_vpcs) - set(new_vpcs)), + "cluster_reference_list": list(set(existing_clusters) - set(new_clusters)), + } + } + + return project_usage_payload diff --git a/framework/calm/dsl/cli/protection_policies.py b/framework/calm/dsl/cli/protection_policies.py new file mode 100644 index 0000000..14572c3 --- /dev/null +++ b/framework/calm/dsl/cli/protection_policies.py @@ -0,0 +1,92 @@ +import click +import sys +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.builtins.models.helper.common import get_project +from calm.dsl.log import get_logging_handle +from .utils import highlight_text + +LOG = get_logging_handle(__name__) + + +def get_protection_policies(limit, offset, project_name, quiet): + """ + Returns protection policies along with the protection rules in the project + """ + + client = get_api_client() + LOG.info("Fetching protection policies") + params = {"length": limit, "offset": offset} + if project_name: + project = get_project(project_name) + params["filter"] = "project_reference=={}".format(project["metadata"]["uuid"]) + res, err = client.app_protection_policy.list(params) + if err: + LOG.error(err) + sys.exit("Unable to list protection policies") + res = res.json()["entities"] + if not res: + click.echo(highlight_text("No protection policy found !!!\n")) + return + + table = PrettyTable() + + if quiet: + table.field_names = ["NAME", "RULE NAME"] + for entity in res: + name = entity["status"]["name"] + for rule in entity["status"]["resources"]["app_protection_rule_list"]: + rule_name = rule["name"] + table.add_row([highlight_text(name), highlight_text(rule_name)]) + click.echo(table) + return + + table.field_names = [ + "NAME", + "UUID", + "RULE NAME", + "RULE UUID", + "RULE TYPE", + "EXPIRY (DAYS)", + "PROJECT", + ] + + for entity in res: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + project_reference = entity["metadata"].get("project_reference", {}) + for rule in entity["status"]["resources"]["app_protection_rule_list"]: + expiry = 0 + rule_type = "" + if rule.get("remote_snapshot_retention_policy", {}): + rule_type = "Remote" + expiry = ( + rule["remote_snapshot_retention_policy"] + .get("snapshot_expiry_policy", {}) + .get("multiple", "") + ) + elif rule.get("local_snapshot_retention_policy", {}): + rule_type = "Local" + expiry = ( + rule["local_snapshot_retention_policy"] + .get("snapshot_expiry_policy", {}) + .get("multiple", "") + ) + rule_name = rule["name"] + rule_uuid = rule["uuid"] + if not expiry: + expiry = "-" + table.add_row( + [ + highlight_text(name), + highlight_text(uuid), + highlight_text(rule_name), + highlight_text(rule_uuid), + highlight_text(rule_type), + highlight_text(expiry), + highlight_text(project_reference.get("name", "")), + ] + ) + + click.echo(table) diff --git a/framework/calm/dsl/cli/protection_policy_commands.py b/framework/calm/dsl/cli/protection_policy_commands.py new file mode 100644 index 0000000..eaa23ad --- /dev/null +++ b/framework/calm/dsl/cli/protection_policy_commands.py @@ -0,0 +1,25 @@ +import click + +from .main import get +from .protection_policies import get_protection_policies +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.command("protection-policies") +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option("--project", "-p", help="Project name") +@click.option( + "--quiet", + "-q", + is_flag=True, + default=False, + help="Show only names of protection rules and corresponding policy", +) +def protection_policy_list(limit, offset, project, quiet): + """Get all protection policies""" + get_protection_policies(limit, offset, project, quiet) diff --git a/framework/calm/dsl/cli/role_commands.py b/framework/calm/dsl/cli/role_commands.py new file mode 100644 index 0000000..02203d4 --- /dev/null +++ b/framework/calm/dsl/cli/role_commands.py @@ -0,0 +1,28 @@ +import click + +from .roles import get_roles +from .main import get + + +@get.command("roles") +@click.option("--name", "-n", default=None, help="Search for roles by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter roles by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option("--quiet", "-q", is_flag=True, default=False, help="Show only role names") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_roles(name, filter_by, limit, offset, quiet, out): + """Get roles, optionally filtered by a string""" + + get_roles(name, filter_by, limit, offset, quiet, out) diff --git a/framework/calm/dsl/cli/roles.py b/framework/calm/dsl/cli/roles.py new file mode 100644 index 0000000..ed7a279 --- /dev/null +++ b/framework/calm/dsl/cli/roles.py @@ -0,0 +1,80 @@ +import click +import json +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle + +from .utils import get_name_query, highlight_text + + +LOG = get_logging_handle(__name__) + + +def get_roles(name, filter_by, limit, offset, quiet, out): + """Get the roles, optionally filtered by a string""" + + client = get_api_client() + ContextObj = get_context() + server_config = ContextObj.get_server_config() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.role.list(params=params) + + if err: + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch roles from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No role found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = ["NAME", "STATE", "UUID"] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["state"]), + highlight_text(metadata["uuid"]), + ] + ) + + click.echo(table) diff --git a/framework/calm/dsl/cli/runbook_commands.py b/framework/calm/dsl/cli/runbook_commands.py new file mode 100644 index 0000000..a29fa5e --- /dev/null +++ b/framework/calm/dsl/cli/runbook_commands.py @@ -0,0 +1,263 @@ +import click + +from calm.dsl.log import get_logging_handle + +from .main import ( + compile, + get, + describe, + delete, + run, + create, + update, + format, + watch, + pause, + resume, + abort, +) +from .runbooks import ( + get_runbook_list, + create_runbook_command, + update_runbook_command, + get_execution_history, + run_runbook_command, + describe_runbook, + delete_runbook, + format_runbook_command, + compile_runbook_command, + watch_runbook_execution, + resume_runbook_execution, + pause_runbook_execution, + abort_runbook_execution, +) + +LOG = get_logging_handle(__name__) + + +@get.command("runbooks", feature_min_version="3.0.0", experimental=True) +@click.option("--name", "-n", default=None, help="Search for runbooks by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter runbooks by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only runbook names." +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +def _get_runbook_list(name, filter_by, limit, offset, quiet, all_items): + """Get the runbooks, optionally filtered by a string""" + + get_runbook_list(name, filter_by, limit, offset, quiet, all_items) + + +@get.command("runbook_executions", feature_min_version="3.0.0", experimental=True) +@click.option( + "--name", + "-n", + default=None, + help="Search for previous runbook runs by name of runbook (Optional)", +) +@click.option( + "--filter", + "filter_by", + "-f", + default=None, + help="Filter previous runbook executions by this string", +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +def _get_execution_history(name, filter_by, limit, offset): + """Get previous runbook executions, optionally filtered by a string""" + + get_execution_history(name, filter_by, limit, offset) + + +@create.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "runbook_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Runbook file to upload", +) +@click.option("--name", "-n", default=None, help="Runbook name (Optional)") +@click.option("--description", default=None, help="Runbook description (Optional)") +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="Deletes existing blueprint with the same name before create.", +) +def _create_runbook_command(runbook_file, name, description, force): + """Creates a runbook""" + + create_runbook_command(runbook_file, name, description, force) + + +@update.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "runbook_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Runbook file to upload", +) +@click.option("--name", "-n", default=None, required=True, help="Runbook name") +@click.option("--description", default=None, help="Runbook description (Optional)") +def _update_runbook_command(runbook_file, name, description): + """Updates a runbook""" + + update_runbook_command(runbook_file, name, description) + + +@delete.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.argument("runbook_names", nargs=-1) +def _delete_runbook(runbook_names): + """Deletes a runbook""" + + delete_runbook(runbook_names) + + +@describe.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.argument("runbook_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format [text|json].", +) +def _describe_runbook(runbook_name, out): + """Describe a runbook""" + + describe_runbook(runbook_name, out) + + +@format.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "runbook_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Runbook file to format", +) +def _format_runbook_command(runbook_file): + """black formats the runbook file""" + + format_runbook_command(runbook_file) + + +@compile.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "runbook_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Runbook file to upload", +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["json", "yaml"]), + default="json", + help="output format [json|yaml].", +) +def _compile_runbook_command(runbook_file, out): + """Compiles a DSL (Python) runbook into JSON or YAML""" + compile_runbook_command(runbook_file, out) + + +@run.command("runbook", feature_min_version="3.0.0", experimental=True) +@click.argument("runbook_name", required=False) +@click.option( + "--file", + "-f", + "runbook_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=False, + help="Path of Runbook file to directly run runbook", +) +@click.option( + "--ignore_runtime_variables", + "-i", + is_flag=True, + default=False, + help="Ignore runtime variables and use defaults", +) +@click.option( + "--input-file", + "input_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=False, + help="Path to python file for runtime editables", +) +@click.option("--watch/--no-watch", "-w", default=False, help="Watch scrolling output") +def _run_runbook_command( + runbook_name, watch, ignore_runtime_variables, runbook_file=None, input_file=None +): + """Execute the runbook given by name or runbook file. All runtime variables and default target will be prompted by default. When passing the 'ignore_runtime_variables' flag, no variables will be prompted and all default values will be used. The runbook default values can be overridden by passing a Python file via 'input_file'. When passing a Python file, no variables will be prompted. + + \b + >: Runbook will be deleted after single run, if '--file/-f' option is used to supply runbook file. + + >: input_file: Python file consisting of variables 'variable_list' and 'default_target' + Ex: variable_list = { + "value": {"value": }, + "name": "" + } + default_target: """ + + run_runbook_command( + runbook_name, + watch, + ignore_runtime_variables, + runbook_file=runbook_file, + input_file=input_file, + ) + + +@watch.command("runbook_execution", feature_min_version="3.0.0", experimental=True) +@click.argument("runlog_uuid", required=True) +def _watch_runbook_execution(runlog_uuid): + """Watch the runbook execution using given runlog UUID""" + + watch_runbook_execution(runlog_uuid) + + +@pause.command("runbook_execution", feature_min_version="3.0.0", experimental=True) +@click.argument("runlog_uuid", required=True) +def _pause_runbook_execution(runlog_uuid): + """Pause the running runbook execution""" + + pause_runbook_execution(runlog_uuid) + + +@resume.command("runbook_execution", feature_min_version="3.0.0", experimental=True) +@click.argument("runlog_uuid", required=True) +def _resume_runbook_execution(runlog_uuid): + """Resume the paused runbook execution""" + + resume_runbook_execution(runlog_uuid) + + +@abort.command("runbook_execution", feature_min_version="3.0.0", experimental=True) +@click.argument("runlog_uuid", required=True) +def _abort_runbook_execution(runlog_uuid): + """Abort the runbook execution""" + + abort_runbook_execution(runlog_uuid) diff --git a/framework/calm/dsl/cli/runbooks.py b/framework/calm/dsl/cli/runbooks.py new file mode 100644 index 0000000..3468db1 --- /dev/null +++ b/framework/calm/dsl/cli/runbooks.py @@ -0,0 +1,1004 @@ +import json +import time +import sys +import uuid +import pathlib + +from ruamel import yaml +import arrow +import click +from prettytable import PrettyTable +from black import format_file_in_place, WriteBack, FileMode + +from calm.dsl.builtins import file_exists +from calm.dsl.runbooks import runbook, create_runbook_payload +from calm.dsl.config import get_context +from calm.dsl.api import get_api_client +from calm.dsl.log import get_logging_handle +from calm.dsl.constants import CACHE +from calm.dsl.store import Cache +from calm.dsl.tools import get_module_from_file +from .utils import ( + Display, + get_name_query, + highlight_text, + get_states_filter, + import_var_from_file, +) +from .constants import RUNBOOK, RUNLOG +from .runlog import get_completion_func, get_runlog_status +from .endpoints import get_endpoint + +from anytree import NodeMixin, RenderTree + +LOG = get_logging_handle(__name__) + + +def get_runbook_list(name, filter_by, limit, offset, quiet, all_items): + """Get the runbooks, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + + if all_items: + filter_query += get_states_filter(RUNBOOK.STATES) + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.runbook.list(params=params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch runbooks from {}".format(pc_ip)) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo(highlight_text("No runbook found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "DESCRIPTION", + "PROJECT", + "STATE", + "EXECUTION HISTORY", + "CREATED BY", + "LAST EXECUTED AT", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + created_by = metadata.get("owner_reference", {}).get("name", "-") + last_run = int(row.get("last_run_time", 0)) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + project = metadata.get("project_reference", {}).get("name", "") + total_runs = int(row.get("run_count", 0)) + int(row.get("running_runs", 0)) + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["description"]), + highlight_text(project), + highlight_text(row["state"]), + highlight_text(total_runs if total_runs else "-"), + highlight_text(created_by), + "{}".format(arrow.get(last_run).humanize()) if last_run else "-", + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(row["uuid"]), + ] + ) + click.echo(table) + + +def get_runbook_module_from_file(runbook_file): + """Return Runbook module given a user runbook dsl file (.py)""" + return get_module_from_file("calm.dsl.user_runbook", runbook_file) + + +def get_runbook_class_from_module(user_runbook_module): + """Returns runbook class given a module""" + + UserRunbook = None + for item in dir(user_runbook_module): + obj = getattr(user_runbook_module, item) + if isinstance(obj, runbook): + UserRunbook = obj + return UserRunbook + + +def compile_runbook(runbook_file): + + user_runbook_module = get_runbook_module_from_file(runbook_file) + UserRunbook = get_runbook_class_from_module(user_runbook_module) + if UserRunbook is None: + return None + + runbook_payload = None + UserRunbookPayload, _ = create_runbook_payload(UserRunbook) + runbook_payload = UserRunbookPayload.get_dict() + + return runbook_payload + + +def compile_runbook_command(runbook_file, out): + + rb_payload = compile_runbook(runbook_file) + if rb_payload is None: + LOG.error("User runbook not found in {}".format(runbook_file)) + return + + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format(project_name) + ) + + project_uuid = project_cache_data.get("uuid", "") + rb_payload["metadata"]["project_reference"] = { + "type": "project", + "uuid": project_uuid, + "name": project_name, + } + + if out == "json": + click.echo(json.dumps(rb_payload, indent=4, separators=(",", ": "))) + elif out == "yaml": + click.echo(yaml.dump(rb_payload, default_flow_style=False)) + else: + LOG.error("Unknown output format {} given".format(out)) + + +def create_runbook( + client, runbook_payload, name=None, description=None, force_create=False +): + + runbook_payload.pop("status", None) + + if name: + runbook_payload["spec"]["name"] = name + runbook_payload["metadata"]["name"] = name + + if description: + runbook_payload["spec"]["description"] = description + + runbook_resources = runbook_payload["spec"]["resources"] + runbook_name = runbook_payload["spec"]["name"] + runbook_desc = runbook_payload["spec"]["description"] + + return client.runbook.upload_with_secrets( + runbook_name, runbook_desc, runbook_resources, force_create=force_create + ) + + +def create_runbook_from_json( + client, path_to_json, name=None, description=None, force_create=False +): + + runbook_payload = json.loads(open(path_to_json, "r").read()) + return create_runbook( + client, + runbook_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_runbook_from_dsl( + client, runbook_file, name=None, description=None, force_create=False +): + + runbook_payload = compile_runbook(runbook_file) + if runbook_payload is None: + err_msg = "User runbook not found in {}".format(runbook_file) + err = {"error": err_msg, "code": -1} + return None, err + + return create_runbook( + client, + runbook_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_runbook_command(runbook_file, name, description, force): + """Creates a runbook""" + + client = get_api_client() + + if runbook_file.endswith(".json"): + res, err = create_runbook_from_json( + client, runbook_file, name=name, description=description, force_create=force + ) + elif runbook_file.endswith(".py"): + res, err = create_runbook_from_dsl( + client, runbook_file, name=name, description=description, force_create=force + ) + else: + LOG.error("Unknown file format {}".format(runbook_file)) + return + + if err: + LOG.error(err["error"]) + return + + runbook = res.json() + runbook_uuid = runbook["metadata"]["uuid"] + runbook_name = runbook["metadata"]["name"] + runbook_status = runbook.get("status", {}) + runbook_state = runbook_status.get("state", "DRAFT") + LOG.debug("Runbook {} has state: {}".format(runbook_name, runbook_state)) + + if runbook_state != "ACTIVE": + msg_list = runbook_status.get("message_list", []) + if not msg_list: + LOG.error("Runbook {} created with errors.".format(runbook_name)) + LOG.debug(json.dumps(runbook_status)) + sys.exit(-1) + + msgs = [] + for msg_dict in msg_list: + msgs.append(msg_dict.get("message", "")) + + LOG.error( + "Runbook {} created with {} error(s): {}".format( + runbook_name, len(msg_list), msgs + ) + ) + sys.exit(-1) + + LOG.info("Runbook {} created successfully.".format(runbook_name)) + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/runbooks/{}".format( + pc_ip, pc_port, runbook_uuid + ) + stdout_dict = {"name": runbook_name, "link": link, "state": runbook_state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def update_runbook(client, runbook_payload, name=None, description=None): + + runbook_payload.pop("status", None) + + if name: + runbook_payload["spec"]["name"] = name + runbook_payload["metadata"]["name"] = name + + if description: + runbook_payload["spec"]["description"] = description + + runbook_resources = runbook_payload["spec"]["resources"] + runbook_name = runbook_payload["spec"]["name"] + runbook_desc = runbook_payload["spec"]["description"] + + runbook = get_runbook(client, runbook_payload["spec"]["name"]) + uuid = runbook["metadata"]["uuid"] + spec_version = runbook["metadata"]["spec_version"] + + return client.runbook.update_with_secrets( + uuid, runbook_name, runbook_desc, runbook_resources, spec_version + ) + + +def update_runbook_from_json(client, path_to_json, name=None, description=None): + + runbook_payload = json.loads(open(path_to_json, "r").read()) + return update_runbook(client, runbook_payload, name=name, description=description) + + +def update_runbook_from_dsl(client, runbook_file, name=None, description=None): + + runbook_payload = compile_runbook(runbook_file) + if runbook_payload is None: + err_msg = "User runbook not found in {}".format(runbook_file) + err = {"error": err_msg, "code": -1} + return None, err + + return update_runbook(client, runbook_payload, name=name, description=description) + + +def update_runbook_command(runbook_file, name, description): + """Updates a runbook""" + + client = get_api_client() + + if runbook_file.endswith(".json"): + res, err = update_runbook_from_json( + client, runbook_file, name=name, description=description + ) + elif runbook_file.endswith(".py"): + res, err = update_runbook_from_dsl( + client, runbook_file, name=name, description=description + ) + else: + LOG.error("Unknown file format {}".format(runbook_file)) + return + + if err: + LOG.error(err["error"]) + return + + runbook = res.json() + runbook_uuid = runbook["metadata"]["uuid"] + runbook_name = runbook["metadata"]["name"] + runbook_status = runbook.get("status", {}) + runbook_state = runbook_status.get("state", "DRAFT") + LOG.debug("Runbook {} has state: {}".format(runbook_name, runbook_state)) + + if runbook_state != "ACTIVE": + msg_list = runbook_status.get("message_list", []) + if not msg_list: + LOG.error("Runbook {} updated with errors.".format(runbook_name)) + LOG.debug(json.dumps(runbook_status)) + sys.exit(-1) + + msgs = [] + for msg_dict in msg_list: + msgs.append(msg_dict.get("message", "")) + + LOG.error( + "Runbook {} updated with {} error(s): {}".format( + runbook_name, len(msg_list), msgs + ) + ) + sys.exit(-1) + + LOG.info("Runbook {} updated successfully.".format(runbook_name)) + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/runbooks/{}".format( + pc_ip, pc_port, runbook_uuid + ) + stdout_dict = {"name": runbook_name, "link": link, "state": runbook_state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def get_execution_history(name, filter_by, limit, offset): + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + runbook = get_runbook(client, name) + runbook_uuid = runbook["metadata"]["uuid"] + filter_query = filter_query + ";action_reference=={}".format(runbook_uuid) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.runbook.list_runbook_runlogs(params=params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + LOG.warning("Cannot fetch previous runs from {}".format(pc_ip)) + return + + json_rows = res.json()["entities"] + if not json_rows: + click.echo(highlight_text("No runbook execution found !!!\n")) + return + + table = PrettyTable() + table.field_names = [ + "SOURCE RUNBOOK", + "STARTED AT", + "ENDED AT", + "COMPLETED IN", + "EXECUTED BY", + "UUID", + "STATE", + ] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + state = row["state"] + started_at = int(metadata["creation_time"]) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + completed_in = last_update_time - started_at + hours, rem = divmod(completed_in, 3600) + minutes, seconds = divmod(rem, 60) + timetaken = "" + if hours: + timetaken = "{} hours {} minutes".format(hours, minutes) + elif minutes: + timetaken = "{} minutes {} seconds".format(minutes, seconds) + else: + timetaken = "{} seconds".format(seconds) + + if state not in RUNLOG.TERMINAL_STATES: + timetaken = "-" + + table.add_row( + [ + highlight_text(row["action_reference"]["name"]), + highlight_text(time.ctime(started_at)), + "{}".format(arrow.get(last_update_time).humanize()) + if state in RUNLOG.TERMINAL_STATES + else "-", + highlight_text(timetaken), + highlight_text(row["userdata_reference"]["name"]), + highlight_text(metadata["uuid"]), + highlight_text(state), + ] + ) + click.echo(table) + + +def get_runbook(client, name, all=False): + + # find runbook + params = {"filter": "name=={}".format(name)} + if not all: + params["filter"] += ";deleted==FALSE" + + res, err = client.runbook.list(params=params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + entities = response.get("entities", None) + runbook = None + if entities: + if len(entities) != 1: + raise Exception("More than one runbook found - {}".format(entities)) + + LOG.info("{} found ".format(name)) + runbook = entities[0] + else: + raise Exception("No runbook found with name {} found".format(name)) + return runbook + + +def parse_input_file(client, runbook, input_file): + + if file_exists(input_file) and input_file.endswith(".py"): + input_variable_list = import_var_from_file(input_file, "variable_list", []) + target = import_var_from_file(input_file, "default_target", "") + else: + LOG.error("Invalid input_file passed! Must be a valid and existing.py file!") + sys.exit(-1) + + args = [] + variable_list = runbook["spec"]["resources"]["runbook"].get("variable_list", []) + for variable in variable_list: + if variable.get("editables", {}).get("value", False): + filtered_input_runtime_var = list( + filter(lambda e: e["name"] == variable.get("name"), input_variable_list) + ) + new_val = "" + if len(filtered_input_runtime_var) == 1: + new_val = filtered_input_runtime_var[0].get("value", "") + if new_val: + args.append( + { + "name": variable.get("name"), + "value": type(variable.get("value"))(new_val), + } + ) + + payload = {"spec": {"args": args}} + if target: + endpoint = get_endpoint(client, target) + endpoint_id = endpoint.get("metadata", {}).get("uuid", "") + payload["spec"]["default_target_reference"] = { + "kind": "app_endpoint", + "uuid": endpoint_id, + "name": target, + } + return payload + + +def patch_runbook_runtime_editables(client, runbook): + + args = [] + variable_list = runbook["spec"]["resources"]["runbook"].get("variable_list", []) + for variable in variable_list: + if variable.get("editables", {}).get("value", False): + new_val = input( + "Value for Variable {} in Runbook (default value={}): ".format( + variable.get("name"), variable.get("value", "") + ) + ) + if new_val: + args.append( + { + "name": variable.get("name"), + "value": type(variable.get("value"))(new_val), + } + ) + + payload = {"spec": {"args": args}} + default_target = ( + runbook["spec"]["resources"] + .get("default_target_reference", {}) + .get("name", None) + ) + target = input( + "Endpoint target for the Runbook Run (default target={}): ".format( + default_target + ) + ) + if target: + endpoint = get_endpoint(client, target) + endpoint_id = endpoint.get("metadata", {}).get("uuid", "") + payload["spec"]["default_target_reference"] = { + "kind": "app_endpoint", + "uuid": endpoint_id, + "name": target, + } + return payload + + +def run_runbook_command( + runbook_name, watch, ignore_runtime_variables, runbook_file=None, input_file=None +): + + if runbook_file is None and runbook_name is None: + LOG.error( + "One of either Runbook Name or Runbook File is required to run runbook." + ) + return + + client = get_api_client() + runbook = None + + if runbook_file: + LOG.info("Uploading runbook: {}".format(runbook_file)) + name = "runbook" + "_" + str(uuid.uuid4())[:8] + if runbook_file.endswith(".json"): + res, err = create_runbook_from_json(client, runbook_file, name=name) + elif runbook_file.endswith(".py"): + res, err = create_runbook_from_dsl(client, runbook_file, name=name) + else: + LOG.error("Unknown file format {}".format(runbook_file)) + return + + if err: + LOG.error(err["error"]) + return + + LOG.info("Uploaded runbook: {}".format(runbook_file)) + runbook = res.json() + runbook_id = runbook["metadata"]["uuid"] + else: + runbook_id = get_runbook(client, runbook_name)["metadata"]["uuid"] + res, err = client.runbook.read(runbook_id) + if err: + LOG.error(err["error"]) + return + runbook = res.json() + + payload = {} + if input_file is None and not ignore_runtime_variables: + payload = patch_runbook_runtime_editables(client, runbook) + if input_file: + payload = parse_input_file(client, runbook, input_file) + + def render_runbook(screen): + screen.clear() + screen.refresh() + run_runbook(screen, client, runbook_id, watch, payload=payload) + if runbook_file: + res, err = client.runbook.delete(runbook_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + screen.wait_for_input(10.0) + + Display.wrapper(render_runbook, watch) + + +def run_runbook(screen, client, runbook_uuid, watch, input_data={}, payload={}): + + res, err = client.runbook.run(runbook_uuid, payload) + if not err: + screen.clear() + screen.print_at("Runbook queued for run", 0, 0) + else: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + runlog_uuid = response["status"]["runlog_uuid"] + + def poll_runlog_status(): + return client.runbook.poll_action_run(runlog_uuid) + + screen.refresh() + should_continue = poll_action(poll_runlog_status, get_runlog_status(screen)) + if not should_continue: + return + res, err = client.runbook.poll_action_run(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + runbook = response["status"]["runbook_json"]["resources"]["runbook"] + + if watch: + screen.refresh() + watch_runbook(runlog_uuid, runbook, screen=screen, input_data=input_data) + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + run_url = "https://{}:{}/console/#page/explore/calm/runbooks/runlogs/{}".format( + pc_ip, pc_port, runlog_uuid + ) + if not watch: + screen.print_at( + "Runbook execution url: {}".format(highlight_text(run_url)), 0, 0 + ) + screen.refresh() + + +def watch_runbook_execution(runlog_uuid): + + client = get_api_client() + + def render_runbook_execution(screen): + screen.clear() + screen.refresh() + + def poll_runlog_status(): + return client.runbook.poll_action_run(runlog_uuid) + + should_continue = poll_action(poll_runlog_status, get_runlog_status(screen)) + if not should_continue: + exit(-1) + res, err = client.runbook.poll_action_run(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + runbook = response["status"]["runbook_json"]["resources"]["runbook"] + + screen.refresh() + watch_runbook(runlog_uuid, runbook, screen) + screen.wait_for_input(10.0) + + Display.wrapper(render_runbook_execution, True) + + +def watch_runbook(runlog_uuid, runbook, screen, poll_interval=10, input_data={}): + + client = get_api_client() + + def poll_func(): + return client.runbook.list_runlogs(runlog_uuid) + + # following code block gets list of metaTask uuids and list of top level tasks uuid of runbook + tasks = runbook["task_definition_list"] + main_task_reference = runbook["main_task_local_reference"]["uuid"] + task_type_map = {} + top_level_tasks = [] + for task in tasks: + task_type_map[task.get("uuid")] = task.get("type", "") + if task.get("uuid") == main_task_reference: + task_list = task.get("child_tasks_local_reference_list", []) + for t in task_list: + top_level_tasks.append(t.get("uuid", "")) + + poll_action( + poll_func, + get_completion_func(screen), + poll_interval=poll_interval, + task_type_map=task_type_map, + top_level_tasks=top_level_tasks, + input_data=input_data, + runlog_uuid=runlog_uuid, + ) + + +def describe_runbook(runbook_name, out): + """Displays runbook data""" + + client = get_api_client() + runbook = get_runbook(client, runbook_name, all=True) + + res, err = client.runbook.read(runbook["metadata"]["uuid"]) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + runbook = res.json() + + if out == "json": + runbook.pop("status", None) + click.echo(json.dumps(runbook, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Runbook Summary----\n") + click.echo( + "Name: " + + highlight_text(runbook_name) + + " (uuid: " + + highlight_text(runbook["metadata"]["uuid"]) + + ")" + ) + click.echo("Description: " + highlight_text(runbook["status"]["description"])) + click.echo("Status: " + highlight_text(runbook["status"]["state"])) + click.echo( + "Owner: " + highlight_text(runbook["metadata"]["owner_reference"]["name"]), + nl=False, + ) + project = runbook["metadata"].get("project_reference", {}) + click.echo(" Project: " + highlight_text(project.get("name", ""))) + + created_on = int(runbook["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + last_updated = int(runbook["metadata"]["last_update_time"]) // 1000000 + past = arrow.get(last_updated).humanize() + click.echo( + "Last Updated: {} ({})\n".format( + highlight_text(time.ctime(last_updated)), highlight_text(past) + ) + ) + runbook_resources = runbook.get("status").get("resources", {}) + runbook_dict = runbook_resources.get("runbook", {}) + + click.echo("Runbook :") + + task_list = runbook_dict.get("task_definition_list", []) + task_map = {} + for task in task_list: + task_map[task.get("uuid")] = task + + # creating task tree for runbook + main_task = runbook_dict.get("main_task_local_reference").get("uuid") + root = addTaskNodes(main_task, task_map) + for pre, _, node in RenderTree(root): + displayTaskNode(node, pre) + + click.echo("\n") + + variable_types = [ + var["label"] if var.get("label", "") else var.get("name") + for var in runbook_dict.get("variable_list", []) + ] + click.echo("\tVariables [{}]:".format(highlight_text(len(variable_types)))) + click.echo("\t\t{}\n".format(highlight_text(", ".join(variable_types)))) + + credential_types = [ + "{} ({})".format(cred.get("name", ""), cred.get("type", "")) + for cred in runbook_resources.get("credential_definition_list", []) + ] + click.echo("Credentials [{}]:".format(highlight_text(len(credential_types)))) + click.echo("\t{}\n".format(highlight_text(", ".join(credential_types)))) + + default_target = runbook_resources.get("default_target_reference", {}).get( + "name", "-" + ) + click.echo("Default Endpoint Target: {}\n".format(highlight_text(default_target))) + + +def format_runbook_command(runbook_file): + path = pathlib.Path(runbook_file) + LOG.debug("Formatting runbook {} using black".format(path)) + if format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.DIFF + ): + LOG.info("Patching above diff to runbook - {}".format(path)) + format_file_in_place( + path, fast=False, mode=FileMode(), write_back=WriteBack.YES + ) + LOG.info("All done!") + else: + LOG.info("Runbook {} left unchanged.".format(path)) + + +def delete_runbook(runbook_names): + + client = get_api_client() + + for runbook_name in runbook_names: + runbook = get_runbook(client, runbook_name) + runbook_id = runbook["metadata"]["uuid"] + res, err = client.runbook.delete(runbook_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + LOG.info("Runbook {} deleted".format(runbook_name)) + + +def pause_runbook_execution(runlog_uuid): + + client = get_api_client() + res, err = client.runbook.pause(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + state = response["status"]["state"] + if state in RUNLOG.TERMINAL_STATES: + LOG.warning("Runbook Execution is in terminal state.") + else: + LOG.info("Pause triggered for the given runbook execution.") + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/runbooks/runlogs/{}".format( + pc_ip, pc_port, runlog_uuid + ) + stdout_dict = {"link": link, "state": state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def resume_runbook_execution(runlog_uuid): + + client = get_api_client() + res, err = client.runbook.play(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + state = response["status"]["state"] + if state == RUNLOG.STATUS.PAUSED: + LOG.info("Resume triggered for the given paused runbook execution.") + else: + LOG.warning("Runbook execution is not in paused state.") + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/runbooks/runlogs/{}".format( + pc_ip, pc_port, runlog_uuid + ) + stdout_dict = {"link": link, "state": state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def abort_runbook_execution(runlog_uuid): + + client = get_api_client() + res, err = client.runbook.poll_action_run(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + state = response["status"]["state"] + if state in RUNLOG.TERMINAL_STATES: + LOG.warning("Runbook Execution is in terminal state: {}".format(state)) + sys.exit(0) + res, err = client.runbook.abort(runlog_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + state = response["status"]["state"] + LOG.info("Abort triggered for the given runbook execution.") + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + link = "https://{}:{}/console/#page/explore/calm/runbooks/runlogs/{}".format( + pc_ip, pc_port, runlog_uuid + ) + stdout_dict = {"link": link, "state": state} + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + +def poll_action(poll_func, completion_func, poll_interval=10, **kwargs): + # Poll every 10 seconds on the runlog status, for 10 mins + maxWait = 10 * 60 + count = 0 + while count < maxWait: + # call status api + res, err = poll_func() + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + (completed, msg) = completion_func(response, **kwargs) + if completed: + # click.echo(msg) + if msg: + return False + break + count += poll_interval + time.sleep(poll_interval) + return True + + +class TaskNode(NodeMixin): + def __init__(self, name, task_type=None, target=None, parent=None): + self.name = name + self.type = task_type + self.target = target + self.parent = parent + + +def addTaskNodes(task_uuid, task_map, parent=None): + task = task_map[task_uuid] + task_name = task.get("name", "") + task_target = task.get("target_any_local_reference", {}).get("name", "") + task_type = task.get("type", "") + + if task_type == "DAG": + node = TaskNode("ROOT") + elif task_type != "META": + node = TaskNode( + task_name, task_type=task_type, target=task_target, parent=parent + ) + else: + node = parent + + if task_type == "DECISION": + success_node = TaskNode("SUCCESS", parent=node) + failure_node = TaskNode("FAILURE", parent=node) + success_task = ( + task.get("attrs", {}).get("success_child_reference", {}).get("uuid", "") + ) + if success_task: + addTaskNodes(success_task, task_map, success_node) + failure_task = ( + task.get("attrs", {}).get("failure_child_reference", {}).get("uuid", "") + ) + if failure_task: + addTaskNodes(failure_task, task_map, failure_node) + return node + + child_tasks = task.get("child_tasks_local_reference_list", []) + for child_task in child_tasks: + addTaskNodes(child_task.get("uuid"), task_map, node) + return node + + +def displayTaskNode(node, pre): + if node.type and node.target: + click.echo( + "\t{}{} (Type: {}, Target: {})".format( + pre, + highlight_text(node.name), + highlight_text(node.type), + highlight_text(node.target), + ) + ) + elif node.type: + click.echo( + "\t{}{} (Type: {})".format( + pre, highlight_text(node.name), highlight_text(node.type) + ) + ) + else: + click.echo("\t{}{}".format(pre, highlight_text(node.name))) diff --git a/framework/calm/dsl/cli/runlog.py b/framework/calm/dsl/cli/runlog.py new file mode 100644 index 0000000..d5e1467 --- /dev/null +++ b/framework/calm/dsl/cli/runlog.py @@ -0,0 +1,645 @@ +import os +import sys + +from asciimatics.widgets import ( + Frame, + Layout, + Divider, + Text, + Button, + DatePicker, + TimePicker, + Label, + DropdownList, +) +from asciimatics.scene import Scene +from asciimatics.screen import Screen +from asciimatics.exceptions import StopApplication +import time +from time import sleep +from datetime import timedelta +import itertools + +from anytree import NodeMixin, RenderTree +import datetime + +from .constants import RUNLOG, SINGLE_INPUT +from calm.dsl.api import get_api_client + + +def parse_machine_name(runlog_id, machine_name): + if not machine_name: + return None + machine_info = machine_name.split("-{} - ".format(runlog_id)) + return machine_info + + +class InputFrame(Frame): + def __init__(self, name, screen, inputs, data): + super(InputFrame, self).__init__( + screen, + int(len(inputs) * 2 + 8), + int(screen.width * 4 // 5), + has_shadow=True, + data=data, + name=name, + ) + layout = Layout([1, len(inputs), 1]) + self.add_layout(layout) + layout.add_widget( + Label("Inputs for the input task '{}'".format(name), height=2), 1 + ) + for singleinput in inputs: + if ( + singleinput.get("input_type", SINGLE_INPUT.TYPE.TEXT) + == SINGLE_INPUT.TYPE.TEXT + ): + layout.add_widget( + Text( + label=singleinput.get("name") + ":", + name=singleinput.get("name"), + on_change=self._on_change, + ), + 1, + ) + elif ( + singleinput.get("input_type", SINGLE_INPUT.TYPE.TEXT) + == SINGLE_INPUT.TYPE.DATE + ): + layout.add_widget( + DatePicker( + label=singleinput.get("name") + ":", + name=singleinput.get("name"), + year_range=range(1899, 2300), + on_change=self._on_change, + ), + 1, + ) + elif ( + singleinput.get("input_type", SINGLE_INPUT.TYPE.TEXT) + == SINGLE_INPUT.TYPE.TIME + ): + layout.add_widget( + TimePicker( + label=singleinput.get("name") + ":", + name=singleinput.get("name"), + seconds=True, + on_change=self._on_change, + ), + 1, + ) + elif singleinput.get("input_type", SINGLE_INPUT.TYPE.TEXT) in [ + SINGLE_INPUT.TYPE.SELECT, + SINGLE_INPUT.TYPE.SELECTMULTIPLE, + ]: + layout.add_widget( + DropdownList( + [(option, option) for option in singleinput.get("options")], + label=singleinput.get("name") + ":", + name=singleinput.get("name"), + on_change=self._on_change, + ), + 1, + ) + elif ( + singleinput.get("input_type", SINGLE_INPUT.TYPE.TEXT) + == SINGLE_INPUT.TYPE.PASSWORD + ): + layout.add_widget( + Text( + label=singleinput.get("name") + ":", + name=singleinput.get("name"), + hide_char="*", + on_change=self._on_change, + ), + 1, + ) + + layout.add_widget(Divider(height=3), 1) + layout2 = Layout([1, 1, 1]) + self.add_layout(layout2) + layout2.add_widget(Button("Submit", self._submit), 1) + self.fix() + + def _set_default(self): + self.set_theme("bright") + + def _on_change(self): + self.save() + + def _submit(self): + for key, value in self.data.items(): + input_payload[key]["value"] = str(value) + raise StopApplication("User requested exit") + + +class RerunFrame(Frame): + def __init__(self, status, screen): + super(RerunFrame, self).__init__( + screen, + int(8), + int(screen.width * 3 // 4), + has_shadow=True, + name="Rerun popup box", + ) + layout = Layout([1, 4, 1]) + self.add_layout(layout) + layout.add_widget( + Label("Runbook run is in FAILURE STATE '{}'".format(status), height=2), 1 + ) + layout2 = Layout([1, 2, 1]) + self.add_layout(layout2) + layout2.add_widget(Button("Re-run", self._rerun), 1) + layout2.add_widget(Button("Exit", self._exit), 2) + self.fix() + + def _rerun(self): + rerun.update({"rerun": True}) + self._exit() + + def _exit(self): + raise StopApplication("User requested exit") + + +class ConfirmFrame(Frame): + def __init__(self, name, screen): + super(ConfirmFrame, self).__init__( + screen, int(8), int(screen.width * 3 // 4), has_shadow=True, name=name + ) + layout = Layout([1, 4, 1]) + self.add_layout(layout) + layout.add_widget(Label("Confirmation for '{}' task".format(name), height=2), 1) + layout2 = Layout([1, 2, 1]) + self.add_layout(layout2) + layout2.add_widget(Button("Pass", self._pass), 1) + layout2.add_widget(Button("Fail", self._fail), 2) + self.fix() + + def _pass(self): + confirm_payload["confirm_answer"] = "SUCCESS" + raise StopApplication("User requested exit") + + def _fail(self): + confirm_payload["confirm_answer"] = "FAILURE" + raise StopApplication("User requested exit") + + +def displayRunLogTree(screen, root, completed_tasks, total_tasks, msg=None): + screen.clear() + if total_tasks: + progress = "{0:.2f}".format(completed_tasks / total_tasks * 100) + screen.print_at("Progress: {}%".format(progress), 0, 0) + + runlog_state = root.children[0].runlog["status"]["state"] + colour = 3 # yellow for pending state + if runlog_state == RUNLOG.STATUS.SUCCESS: + colour = 2 # green for success + elif runlog_state in RUNLOG.FAILURE_STATES: + colour = 1 # red for failure + elif runlog_state == RUNLOG.STATUS.RUNNING: + colour = 4 # blue for running state + elif runlog_state == RUNLOG.STATUS.INPUT: + colour = 6 # cyan for input state + + screen.print_at( + runlog_state, + screen.width - len(runlog_state) - 5 if hasattr(screen, "width") else 0, + 0, + colour=colour, + attr=Screen.A_UNDERLINE, + ) + line = 1 + for pre, fill, node in RenderTree(root): + line = displayRunLog(screen, node, pre, fill, line) + if msg: + screen.print_at(msg, 0, line, colour=6) + line = line + 1 + screen.refresh() + return line + + +class RunlogNode(NodeMixin): + def __init__( + self, + runlog, + machine=None, + parent=None, + children=None, + outputs=None, + reasons=None, + ): + self.runlog = runlog + self.parent = parent + self.outputs = outputs or [] + self.reasons = reasons or [] + self.machine = machine + if children: + self.children = children + + +def displayRunLog(screen, obj, pre, fill, line): + + if not isinstance(obj, RunlogNode): + return super().default(obj) + + metadata = obj.runlog["metadata"] + status = obj.runlog["status"] + state = status["state"] + output = "" + reason_list = "" + + idx = itertools.count(start=line, step=1).__next__ + + if status["type"] == "task_runlog": + name = status["task_reference"]["name"] + for out in obj.outputs: + output += "'{}'\n".format(out[:-1]) + for reason in obj.reasons: + reason_list += "'{}'\n".format(reason) + elif status["type"] == "runbook_runlog": + if "call_runbook_reference" in status: + name = status["call_runbook_reference"]["name"] + else: + name = status["runbook_reference"]["name"] + elif status["type"] == "action_runlog" and "action_reference" in status: + name = status["action_reference"]["name"] + elif status["type"] == "app": + screen.print_at("{}{}".format(pre, status["name"]), 0, idx()) + return idx() + else: + screen.print_at("{}root".format(pre), 0, idx()) + return idx() + + # TODO - Fix KeyError for action_runlog + + if obj.machine: + name = "{} ['{}']".format(name, obj.machine) + + creation_time = int(metadata["creation_time"]) // 1000000 + username = ( + status["userdata_reference"]["name"] if "userdata_reference" in status else None + ) + last_update_time = int(metadata["last_update_time"]) // 1000000 + + if state in RUNLOG.TERMINAL_STATES: + time_stats = "[Time Taken: {:0>8}]".format( + str(timedelta(seconds=last_update_time - creation_time)) + ) + else: + time_stats = "[Started: {}]".format(time.ctime(creation_time)) + + prefix = "{}{} (Status:".format(pre, name) + screen.print_at("{} {}) {}".format(prefix, state, time_stats), 0, line) + colour = 3 # yellow for pending state + if state == RUNLOG.STATUS.SUCCESS: + colour = 2 # green for success + elif state in RUNLOG.FAILURE_STATES: + colour = 1 # red for failure + elif state == RUNLOG.STATUS.RUNNING: + colour = 4 # blue for running state + elif state == RUNLOG.STATUS.INPUT: + colour = 6 # cyan for input state + if os.isatty(sys.stdout.fileno()): + screen.print_at("{}".format(state), len(prefix) + 1, idx(), colour=colour) + + if obj.children: + fill = fill + "\u2502" + + if status["type"] == "action_runlog": + screen.print_at("{}\t Runlog UUID: {}".format(fill, metadata["uuid"]), 0, idx()) + + if username: + screen.print_at("{}\t Run by: {}".format(fill, username), 0, idx()) + + if output: + screen.print_at("{}\t Output :".format(fill), 0, idx()) + output_lines = output.splitlines() + for line in output_lines: + y_coord = idx() + screen.print_at("{}\t {}".format(fill, line), 0, y_coord, colour=5, attr=1) + screen.print_at(fill, 0, y_coord) + + if reason_list: + screen.print_at("{}\t Reasons :".format(fill), 0, idx()) + reason_lines = reason_list.splitlines() + for line in reason_lines: + y_coord = idx() + screen.print_at("{}\t {}".format(fill, line), 0, y_coord, colour=1, attr=1) + screen.print_at(fill, 0, y_coord) + + if status["type"] == "task_runlog" and state == RUNLOG.STATUS.INPUT: + attrs = status.get("attrs", None) + if not isinstance(attrs, dict): + return idx() + + input_tasks.append( + {"name": name, "uuid": metadata["uuid"], "inputs": attrs.get("inputs", [])} + ) + + if status["type"] == "task_runlog" and state == RUNLOG.STATUS.CONFIRM: + confirm_tasks.append({"name": name, "uuid": metadata["uuid"]}) + return idx() + + +def get_completion_func(screen): + def is_action_complete( + response, + task_type_map=[], + top_level_tasks=[], + input_data={}, + runlog_uuid=None, + **kwargs, + ): + + client = get_api_client() + global input_tasks + global input_payload + global confirm_tasks + global confirm_payload + global rerun + input_tasks = [] + confirm_tasks = [] + entities = response["entities"] + if len(entities): + + # catching interrupt for pause and play + interrupt = None + if hasattr(screen, "get_event"): + interrupt = screen.get_event() + + # Sort entities based on creation time + sorted_entities = sorted( + entities, key=lambda x: int(x["metadata"]["creation_time"]) + ) + + # Create nodes of runlog tree and a map based on uuid + root = None + nodes = {} + runlog_map = {} + for runlog in sorted_entities: + # Create root node + # TODO - Get details of root node + if not root: + root_uuid = runlog["status"]["root_reference"]["uuid"] + root_runlog = { + "metadata": {"uuid": root_uuid}, + "status": {"type": "action_runlog", "state": ""}, + } + runlog_map[str(root_uuid)] = root_runlog + root = RunlogNode(root_runlog) + nodes[str(root_uuid)] = root + + uuid = runlog["metadata"]["uuid"] + runlog_map[str(uuid)] = runlog + reasons = runlog["status"].get("reason_list", []) + outputs = [] + machine_name = runlog["status"].get("machine_name", None) + machine = parse_machine_name(runlog_uuid, machine_name) + if machine and len(machine) == 1: + runlog["status"]["machine_name"] = "-" + continue # this runlog corresponds to endpoint loop + elif machine: + machine = "{} ({})".format(machine[1], machine[0]) + + if runlog["status"]["type"] == "task_runlog": + + task_id = runlog["status"]["task_reference"]["uuid"] + if task_type_map[task_id] == "META": + continue # don't add metatask's trl in runlogTree + + # Output is not valid for input, confirm and while_loop tasks + if task_type_map[task_id] not in ["INPUT", "CONFIRM", "WHILE_LOOP"]: + res, err = client.runbook.runlog_output(runlog_uuid, uuid) + if err: + raise Exception( + "\n[{}] - {}".format(err["code"], err["error"]) + ) + runlog_output = res.json() + output_list = runlog_output["status"]["output_list"] + if len(output_list) > 0: + outputs.append(output_list[0]["output"]) + + nodes[str(uuid)] = RunlogNode( + runlog, + parent=root, + outputs=outputs, + machine=machine, + reasons=reasons, + ) + + # Attach parent to nodes + for runlog in sorted_entities: + uuid = runlog["metadata"]["uuid"] + if nodes.get(str(uuid), None) is None: + continue + parent_uuid = runlog["status"]["parent_reference"]["uuid"] + parent_runlog = runlog_map[str(parent_uuid)] + parent_type = parent_runlog["status"]["type"] + while ( + parent_type == "task_runlog" + and task_type_map[parent_runlog["status"]["task_reference"]["uuid"]] + == "META" + ) or parent_runlog["status"].get("machine_name", None) == "-": + parent_uuid = parent_runlog["status"]["parent_reference"]["uuid"] + parent_runlog = runlog_map[str(parent_uuid)] + parent_type = parent_runlog["status"]["type"] + + node = nodes[str(uuid)] + node.parent = nodes[str(parent_uuid)] + + # Show Progress + # TODO - Draw progress bar + total_tasks = len(top_level_tasks) + task_status_map = {} + completed_tasks = 0 + for runlog in sorted_entities: + runlog_type = runlog["status"]["type"] + if runlog_type == "task_runlog": + task_id = runlog["status"]["task_reference"]["uuid"] + state = runlog["status"]["state"] + if ( + state in RUNLOG.TERMINAL_STATES + and task_id in top_level_tasks + and not task_status_map.get(task_id, None) + ): + task_status_map[task_id] = "COMPLETED" + elif ( + state not in RUNLOG.TERMINAL_STATES + and task_id in top_level_tasks + ): + task_status_map[task_id] = "RUNNING" + for key, val in task_status_map.items(): + if val == "COMPLETED": + completed_tasks += 1 + + line = displayRunLogTree(screen, root, completed_tasks, total_tasks) + + # Check if any tasks is in INPUT state + if len(input_tasks) > 0: + sleep(2) + for input_task in input_tasks: + name = input_task.get("name", "") + inputs = input_task.get("inputs", []) + task_uuid = input_task.get("uuid", "") + input_payload = {} + data = {} + inputs_required = [] + input_value = input_data.get(name, {}) + for singleinput in inputs: + input_type = singleinput.get( + "input_type", SINGLE_INPUT.TYPE.TEXT + ) + input_name = singleinput.get("name", "") + value = input_value.get(input_name, "") + if not value: + inputs_required.append(singleinput) + data.update({input_name: ""}) + input_payload.update( + {input_name: {"secret": False, "value": value}} + ) + if input_type == SINGLE_INPUT.TYPE.PASSWORD: + input_payload.update( + {input_name: {"secret": True, "value": value}} + ) + elif input_type == SINGLE_INPUT.TYPE.DATE: + data.update({input_name: datetime.datetime.now().date()}) + elif input_type == SINGLE_INPUT.TYPE.TIME: + data.update({input_name: datetime.datetime.now().time()}) + if len(inputs_required) > 0: + screen.play( + [ + Scene( + [InputFrame(name, screen, inputs_required, data)], + -1, + ) + ] + ) + if client is not None: + client.runbook.resume( + runlog_uuid, task_uuid, {"properties": input_payload} + ) + input_tasks = [] + msg = "Sending resume for input tasks with input values" + line = displayRunLogTree( + screen, root, completed_tasks, total_tasks, msg=msg + ) + + # Check if any tasks is in CONFIRM state + if len(confirm_tasks) > 0: + sleep(2) + for confirm_task in confirm_tasks: + name = confirm_task.get("name", "") + task_uuid = confirm_task.get("uuid", "") + confirm_payload = {} + screen.play([Scene([ConfirmFrame(name, screen)], -1)]) + if client is not None: + client.runbook.resume(runlog_uuid, task_uuid, confirm_payload) + confirm_tasks = [] + msg = "Sending resume for confirm tasks with confirmation" + line = displayRunLogTree( + screen, root, completed_tasks, total_tasks, msg=msg + ) + + if ( + interrupt + and hasattr(interrupt, "key_code") + and interrupt.key_code in (3, 4) + ): + # exit interrupt + screen.close() + sys.exit(-1) + elif ( + interrupt + and hasattr(interrupt, "key_code") + and interrupt.key_code == 32 + ): + # on space pause/play runbook based on current state + runlog_state = root.children[0].runlog["status"]["state"] + + if runlog_state in [ + RUNLOG.STATUS.RUNNING, + RUNLOG.STATUS.INPUT, + RUNLOG.STATUS.CONFIRM, + ]: + client.runbook.pause(runlog_uuid) + msg = "Triggered pause on the runnning Runbook Execution" + elif runlog_state in [RUNLOG.STATUS.PAUSED]: + client.runbook.play(runlog_uuid) + msg = "Triggered play on the paused Runbook Execution" + line = displayRunLogTree( + screen, root, completed_tasks, total_tasks, msg=msg + ) + + rerun = {} + for runlog in sorted_entities: + state = runlog["status"]["state"] + if state in RUNLOG.FAILURE_STATES: + sleep(2) + msg = "Action failed." + if os.isatty(sys.stdout.fileno()): + msg += " Exit screen?" + screen.play([Scene([RerunFrame(state, screen)], -1)]) + if rerun.get("rerun", False): + client.runbook.rerun(runlog_uuid) + msg = "Triggered rerun for the Runbook Runlog" + displayRunLogTree( + screen, root, completed_tasks, total_tasks, msg=msg + ) + return (False, "") + displayRunLogTree( + screen, root, completed_tasks, total_tasks, msg=msg + ) + return (True, msg) + else: + return (True, msg) + if state not in RUNLOG.TERMINAL_STATES: + return (False, "") + + msg = "Action ran successfully." + if os.isatty(sys.stdout.fileno()): + msg += " Exit screen?" + screen.print_at(msg, 0, line, colour=6) + screen.refresh() + + return (True, msg) + return (False, "") + + return is_action_complete + + +def get_runlog_status(screen): + def check_runlog_status(response, client=None, **kwargs): + + # catching interrupt for exit + interrupt = None + if hasattr(screen, "get_event"): + interrupt = screen.get_event() + + if ( + interrupt + and hasattr(interrupt, "key_code") + and interrupt.key_code in (3, 4) + ): + # exit interrupt + screen.close() + sys.exit(-1) + + if response["status"]["state"] == "PENDING": + msg = "Runlog run is in PENDING state" + screen.clear() + screen.print_at(msg, 0, 0) + screen.refresh() + elif response["status"]["state"] in RUNLOG.FAILURE_STATES: + msg = "Runlog run is in {} state.".format(response["status"]["state"]) + msg += " {}".format("\n".join(response["status"]["reason_list"])) + screen.clear() + screen.print_at(msg, 0, 0) + screen.refresh() + if response["status"]["reason_list"] == []: + return (True, "") + return (True, msg) + else: + return (True, "") + return (False, msg) + + return check_runlog_status diff --git a/framework/calm/dsl/cli/scheduler.py b/framework/calm/dsl/cli/scheduler.py new file mode 100644 index 0000000..c1d62b0 --- /dev/null +++ b/framework/calm/dsl/cli/scheduler.py @@ -0,0 +1,693 @@ +import sys +import json +import time +import calendar +import arrow +import click +from prettytable import PrettyTable +from datetime import datetime + +try: + from zoneinfo import ZoneInfo +except ImportError: + from backports.zoneinfo import ZoneInfo + + +from calm.dsl.api import get_api_client +from calm.dsl.builtins import Job +from calm.dsl.builtins.models import job +from calm.dsl.cli import runbooks +from calm.dsl.cli import apps +from calm.dsl.config import get_context +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle +from calm.dsl.store import Cache +from calm.dsl.tools import get_module_from_file + +# from calm.dsl.builtins.models.metadata_payload import get_metadata_payload +from .utils import ( + Display, + get_name_query, + highlight_text, + get_states_filter, + import_var_from_file, +) +from .constants import JOBS, JOBINSTANCES, SYSTEM_ACTIONS + +LOG = get_logging_handle(__name__) + + +def create_job_command(job_file, name, description, force): + """Creates a job in scheduler""" + + # if job_file.endswith(".json"): + # res, err = create_job_from_json( + # client, job_file, name=name, description=description, force_create=force + # ) + if job_file.endswith(".py"): + res, err = create_job_from_dsl( + job_file, name=name, description=description, force_create=force + ) + else: + LOG.error("Unknown file format {}".format(job_file)) + return + + if err: + LOG.error(err["error"]) + return err["error"] + + job = res.json() + + job_uuid = job["metadata"]["uuid"] + job_name = job["metadata"]["name"] + job_state = job["resources"]["state"] + LOG.debug("Job {} has uuid: {}".format(job_name, job_uuid)) + + if job_state != "ACTIVE": + msg_list = job.get("resources").get("message_list", []) + if not msg_list: + LOG.error("Job {} created with errors.".format(job_name)) + LOG.debug(json.dumps(job)) + return job + + msgs = [] + for msg_dict in msg_list: + msgs.append(msg_dict.get("message", "")) + + LOG.error( + "Job {} created with {} error(s): {}".format(job_name, len(msg_list), msgs) + ) + return job + + LOG.info("Job {} created successfully.".format(job_name)) + return job + + +# +# def create_job_from_json( +# client, path_to_json, name=None, description=None, force_create=False +# ): +# +# runbook_payload = json.loads(open(path_to_json, "r").read()) +# return create_runbook( +# client, +# runbook_payload, +# name=name, +# description=description, +# force_create=force_create, +# ) + + +def get_job_module_from_file(job_file): + """Returns Job module given a user job dsl file (.py)""" + return get_module_from_file("calm.dsl.user_job", job_file) + + +def get_job_class_from_module(user_bp_module): + """Returns Job class given a module""" + + UserJob = None + for item in dir(user_bp_module): + obj = getattr(user_bp_module, item) + if isinstance(obj, (type(Job))): + if obj.__bases__[0] == Job: + UserJob = obj + + return UserJob + + +def create_job_from_dsl(job_file, name=None, description=None, force_create=False): + + job_payload = compile_job(job_file) + if job_payload is None: + err_msg = "User job not found in {}".format(job_file) + err = {"error": err_msg, "code": -1} + return None, err + + return create_job( + job_payload, + name=name, + description=description, + force_create=force_create, + ) + + +def create_job(job_payload, name=None, description=None, force_create=False): + + if name: + job_payload["resources"]["name"] = name + job_payload["metadata"]["name"] = name + + if description: + job_payload["resources"]["description"] = description + + client = get_api_client() + return client.job.create(job_payload) + + +def compile_job(job_file): + """returns compiled payload from dsl file""" + + # metadata_payload = get_metadata_payload(job_file) + + user_job_module = get_job_module_from_file(job_file) + UserJob = get_job_class_from_module(user_job_module) + if UserJob is None: + LOG.error("Job not found in {}".format(job_file)) + return + + # create job payload + job_payload = { + "resources": UserJob.get_dict(), + "metadata": { + "name": UserJob.get_dict()["name"], + "kind": "job", + }, + "api_version": "3.0", + } + + # if "project_reference" in metadata_payload: + # # Read metadata payload and set project reference + # job_payload["metadata"]["project_reference"] = metadata_payload[ + # "project_reference" + # ] + # else: + # Read project name and uuid from config and set in job payload + ContextObj = get_context() + project_config = ContextObj.get_project_config() + project_name = project_config["name"] + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format(project_name) + ) + sys.exit("Project not found.") + + project_uuid = project_cache_data.get("uuid", "") + job_payload["metadata"]["project_reference"] = { + "kind": "project", + "uuid": project_uuid, + "name": project_name, + } + + executable_type = job_payload["resources"]["executable"].get("entity").get("type") + project_uuid = job_payload["metadata"]["project_reference"].get("uuid") + project_name = job_payload["metadata"]["project_reference"].get("name") + executable_uuid = job_payload["resources"]["executable"].get("entity").get("uuid") + + if executable_type == "app": + # Get app uuid from name + client = get_api_client() + res, err = client.application.read(executable_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + app = res.json() + + # Check if project uuid in config is same as project uuid of the app + if app["metadata"]["project_reference"]["uuid"] != project_uuid: + application_name = app["metadata"]["name"] + + LOG.error( + "Application {} does not belong to project {}.".format( + application_name, project_name + ) + ) + sys.exit( + "Application {} does not belong to project {}.".format( + application_name, project_name + ) + ) + elif executable_type == "runbook": + client = get_api_client() + res, err = client.runbook.read(executable_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + runbook = res.json() + + # Check if project uuid in config is same as project uuid of the runbook + if runbook["metadata"]["project_reference"]["uuid"] != project_uuid: + runbook_name = runbook["metadata"]["name"] + LOG.error( + "Runbook '{}' does not belong to project '{}'.".format( + runbook_name, project_name + ) + ) + sys.exit( + "Runbook '{}' does not belong to project '{}'.".format( + runbook_name, project_name + ) + ) + + return job_payload + + +def get_job(client, name, all=False): + + # find job + params = {"filter": "name=={}".format(name)} + if not all: + params["filter"] += ";deleted==FALSE" + + res, err = client.job.list(params=params) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + response = res.json() + entities = response.get("entities", None) + job_data = None + if entities: + if len(entities) != 1: + raise Exception("More than one job found - {}".format(entities)) + + LOG.info("Job {} found ".format(name)) + job_data = entities[0] + else: + raise Exception("No job found with name {} found".format(name)) + return job_data + + +def describe_job_command(job_name, out): + """Displays job data""" + client = get_api_client() + job_get_res = get_job(client, job_name, all=True) + + res, err = client.job.read(job_get_res["metadata"]["uuid"]) + + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + job_response = res.json() + + if out == "json": + job_response.pop("status", None) + click.echo(json.dumps(job_response, indent=4, separators=(",", ": "))) + return + + click.echo("\n----Job Summary----\n") + click.echo( + "Name: " + + highlight_text(job_response["resources"]["name"]) + + " (uuid: " + + highlight_text(job_response["metadata"]["uuid"]) + + " (project: " + + highlight_text(job_response["metadata"]["project_reference"]["name"]) + + ")" + ) + + description = job_response["resources"].get("description", "") + click.echo("Description: " + highlight_text(description)) + + schedule_type = job_response["resources"]["type"] + click.echo("Status: " + highlight_text(job_response["resources"]["state"])) + + owner = job_response["metadata"]["owner_reference"]["name"] + click.echo("Owner: " + highlight_text(owner)) + + created_on = int(job_response["metadata"]["creation_time"]) // 1000000 + past = arrow.get(created_on).humanize() + click.echo( + "Created: {} ({})".format( + highlight_text(time.ctime(created_on)), highlight_text(past) + ) + ) + last_updated = int(job_response["metadata"]["last_update_time"]) // 1000000 + past = arrow.get(last_updated).humanize() + click.echo( + "Last Updated: {} ({})\n".format( + highlight_text(time.ctime(last_updated)), highlight_text(past) + ) + ) + + message_list = job_response["resources"].get("message_list", "") + messages = [] + if len(message_list) != 0: + for message in message_list: + messages.append(message) + + if len(messages) > 0: + click.echo("----Errors----") + click.echo("Messages:") + for message in messages: + click.echo( + highlight_text(message.get("reason", "")) + + " for attribute " + + highlight_text(message.get("details").get("attribute_name")) + + " ." + + highlight_text(message.get("message", "")) + ) + click.echo("") + + executable_type = job_response["resources"]["executable"]["entity"]["type"] + + click.echo("--Schedule Info--") + + click.echo("Schedule Type: " + highlight_text(schedule_type)) + time_zone = job_response["resources"]["schedule_info"]["time_zone"] + click.echo("Time Zone: " + highlight_text(time_zone)) + + if schedule_type == "ONE-TIME": + start_time = int(job_response["resources"]["schedule_info"]["execution_time"]) + past = arrow.get(start_time).humanize() + click.echo( + "Starts On: {} ({})".format( + highlight_text(time.ctime(start_time)), highlight_text(past) + ) + ) + + elif schedule_type == "RECURRING": + start_time = int(job_response["resources"]["schedule_info"]["start_time"]) + past = arrow.get(start_time).humanize() + click.echo( + "Starts On: {} ({})".format( + highlight_text(time.ctime(start_time)), highlight_text(past) + ) + ) + + expiry_time = job_response["resources"]["schedule_info"].get("expiry_time", "") + if expiry_time == "": + click.echo("Ends: {}".format(highlight_text("Never"))) + else: + past = arrow.get(expiry_time).humanize() + click.echo( + "Ends On: {} ({})".format( + highlight_text(time.ctime(int(expiry_time))), highlight_text(past) + ) + ) + + schedule = job_response["resources"]["schedule_info"]["schedule"] + click.echo("Schedule: {}".format(highlight_text(schedule))) + + next_execution_time = int(job_response["resources"]["next_execution_time"]) + past = arrow.get(next_execution_time).humanize() + click.echo( + "Next Execution Time: {} ({})\n".format( + highlight_text(time.ctime(next_execution_time)), highlight_text(past) + ) + ) + if executable_type == "runbook": + runbook_uuid = job_response["resources"]["executable"]["entity"]["uuid"] + res, err = client.runbook.read(runbook_uuid) + runbook = res.json() + + msg_list = runbook.get("message_list", []) + msgs = [] + for msg_dict in msg_list: + msgs.append(msg_dict.get("message", "")) + + click.echo("--Executable Info--") + + click.echo("Type: " + highlight_text(executable_type)) + + # If runbook is not found + if len(msg_list) != 0 or len(msgs) != 0: + click.echo(msgs) + + else: + click.echo( + "Name: " + + highlight_text(runbook["metadata"]["name"]) + + " (uuid: " + + highlight_text(runbook["metadata"]["uuid"]) + + ")" + ) + + variable_list_string = job_response["resources"]["executable"]["action"][ + "spec" + ].get("payload", "") + endpoint_name = "" + endpoint_uuid = "" + if variable_list_string != "": + variable_list = json.loads(variable_list_string) + endpoint_target_reference = variable_list.get("spec").get( + "default_target_reference" + ) + if endpoint_target_reference is not None: + endpoint_name = endpoint_target_reference.get("name", "") + endpoint_uuid = endpoint_target_reference.get("uuid", "") + + variable_list = variable_list["spec"]["args"] + + click.echo("Runbook :") + + variable_types = [] + + for var in variable_list: + var_name = var.get("name") + var_value = var.get("value", "") + variable_types.append( + "Name: " + var_name + " | " + "Value: " + var_value + ) + + click.echo( + "\tVariables [{}]:".format(highlight_text(len(variable_types))) + ) + click.echo("\t\t{}\n".format(highlight_text(", ".join(variable_types)))) + + click.echo( + "Default Endpoint Target: " + + highlight_text(endpoint_name) + + " (uuid: " + + highlight_text(endpoint_uuid) + + ")" + ) + elif executable_type == "app": + click.echo("--Executable Info--") + + app_uuid = job_response["resources"]["executable"]["entity"]["uuid"] + res, err = client.application.read(app_uuid) + application = res.json() + + click.echo("Type: " + highlight_text(executable_type.upper())) + click.echo( + "Application Name: " + highlight_text(application["metadata"]["name"]) + ) + app_spec = application["spec"] + calm_action_uuid = job_response["resources"]["executable"]["action"]["spec"][ + "uuid" + ] + action_payload = next( + ( + action + for action in app_spec["resources"]["action_list"] + if action["uuid"] == calm_action_uuid + ), + None, + ) + click.echo( + "Application Action: " + highlight_text(action_payload.get("name", "")) + ) + action_args = apps.get_action_runtime_args( + app_uuid=app_uuid, + action_payload=action_payload, + patch_editables=False, + runtime_params_file=False, + ) + if not action_payload: + LOG.error("No action found") + sys.exit(-1) + + if len(action_args) > 0: + variable_types = [] + + for var in action_args: + var_name = var.get("name") + var_value = var.get("value", "") + variable_types.append( + "Name: " + var_name + " | " + "Value: " + var_value + ) + + click.echo("\tVariables [{}]:".format(highlight_text(len(variable_types)))) + click.echo("\t\t{}\n".format(highlight_text(", ".join(variable_types)))) + + +def get_job_list_command(name, filter_by, limit, offset, quiet, all_items): + """Get the runbooks, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + + if all_items: + filter_query += get_states_filter(JOBS.STATES) + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.job.list(params=params) + + if err: + LOG.warning("Cannot fetch jobs.") + return + + json_rows = res.json().get("entities", "") + + if not json_rows: + click.echo(highlight_text("No jobs found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["resources"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + # "DESCRIPTION", + "PROJECT", + "STATE", + "TYPE", + # "EXECUTION HISTORY", + "CREATED BY", + # "LAST EXECUTED AT", + "LAST UPDATED", + "UUID", + ] + for _row in json_rows: + row = _row["resources"] + metadata = _row["metadata"] + + created_by = metadata.get("owner_reference", {}).get("name", "-") + description = row.get("description", "-") + # last_run = int(row.get("last_run_time", 0)) // 1000000 + last_update_time = int(metadata["last_update_time"]) // 1000000 + project = metadata.get("project_reference", {}).get("name", "") + # total_runs = int(row.get("run_count", 0)) + int(row.get("running_runs", 0)) + + table.add_row( + [ + highlight_text(row["name"]), + # highlight_text(description), + highlight_text(project), + highlight_text(row["state"]), + highlight_text(row["type"]), + # highlight_text(total_runs if total_runs else "-"), + highlight_text(created_by), + # "{}".format(arrow.get(last_run).humanize()) if last_run else "-", + "{}".format(arrow.get(last_update_time).humanize()), + highlight_text(metadata["uuid"]), + ] + ) + click.echo(table) + return table + + +def get_job_instances_command(job_name, out, filter_by, limit, offset, all_items): + """Displays job instance data""" + client = get_api_client() + job_get_res = get_job(client, job_name, all=True) + + params = {"length": limit, "offset": offset} + filter_query = "" + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + + if all_items: + filter_query += get_states_filter(JOBINSTANCES.STATES) + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.job.instances(job_get_res["metadata"]["uuid"], params=params) + + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(err["error"]) + + json_rows = res.json().get("entities", "") + + if not json_rows: + click.echo(highlight_text("No job instances found !!!\n")) + LOG.debug("response:{}".format(res.json())) + return "[]" + + if out == "json": + click.echo(json.dumps(json_rows, indent=4, separators=(",", ": "))) + return json.dumps(json_rows, indent=4) + + click.echo("\nJob Name: {}\n".format(highlight_text(job_name))) + + click.echo("--Job Instances List--\n") + + table = PrettyTable() + table.field_names = [ + "STATE", + "SCHEDULED TIME", + "START TIME", + "END TIME", + "CREATED", + "REASON", + "UUID", + ] + for _row in json_rows: + row = _row["resources"] + metadata = _row["metadata"] + + start_timestamp = "" + past_start_time_output = "-" + start_time = int(row["start_time"]) + + reason = row.get("reason", "-") + + if start_time != 0: + start_timestamp = time.ctime(start_time) + past_start_time = arrow.get(start_time).humanize() + past_start_time_output = " ({})".format(past_start_time) + + scheduled_time = int(row["scheduled_time"]) + past_scheduled_time = arrow.get(scheduled_time).humanize() + + creation_time = int(metadata["creation_time"]) // 1000000 + + table.add_row( + [ + highlight_text(row["state"]), + highlight_text( + str(time.ctime(scheduled_time)) + + " ({})".format(past_scheduled_time) + ), + highlight_text(str(start_timestamp) + past_start_time_output), + highlight_text("-"), + "{}".format(arrow.get(creation_time).humanize()), + highlight_text(reason), + highlight_text(metadata["uuid"]), + ] + ) + click.echo(table) + + +def delete_job(job_names): + """Delete jobs""" + client = get_api_client() + + for job_name in job_names: + + job_get_res = get_job(client, job_name, all=True) + + job_uuid = job_get_res["metadata"]["uuid"] + + _, err = client.job.delete(job_uuid) + if err: + LOG.error("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + LOG.info("Job {} deleted".format(job_name)) diff --git a/framework/calm/dsl/cli/scheduler_commands.py b/framework/calm/dsl/cli/scheduler_commands.py new file mode 100644 index 0000000..923210f --- /dev/null +++ b/framework/calm/dsl/cli/scheduler_commands.py @@ -0,0 +1,112 @@ +import click + +from calm.dsl.log import get_logging_handle + +from .main import ( + create, + describe, + get, + delete, +) +from .scheduler import ( + create_job_command, + describe_job_command, + get_job_list_command, + get_job_instances_command, + delete_job, +) + +LOG = get_logging_handle(__name__) + + +@create.command("job", feature_min_version="3.0.0", experimental=True) +@click.option( + "--file", + "-f", + "job_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=True, + help="Path of Job file to upload", +) +@click.option("--name", "-n", default=None, help="Job name (Optional)") +@click.option("--description", default=None, help="Job description (Optional)") +@click.option( + "--force", + "-fc", + is_flag=True, + default=False, + help="", +) +def _create_job_command(job_file, name, description, force): + """Creates a job in scheduler""" + + create_job_command(job_file, name, description, force) + + +@describe.command("job", feature_min_version="3.0.0", experimental=True) +@click.argument("job_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format [text|json].", +) +def _describe_job(job_name, out): + """Describe a job""" + + describe_job_command(job_name, out) + + +@get.command("jobs", feature_min_version="3.0.0", experimental=True) +@click.option("--name", "-n", default=None, help="Search for job by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter jobs by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option("--quiet", "-q", is_flag=True, default=False, help="Show only job names.") +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +def _get_job_list(name, filter_by, limit, offset, quiet, all_items): + """Get the jobs, optionally filtered by a string""" + + get_job_list_command(name, filter_by, limit, offset, quiet, all_items) + + +@get.command("job_instances", feature_min_version="3.0.0", experimental=True) +@click.argument("job_name") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format [text|json].", +) +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter jobs by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-o", default=0, help="Offset results by the specified amount" +) +@click.option( + "--all-items", "-a", is_flag=True, help="Get all items, including deleted ones" +) +def _get_job_instances(job_name, out, filter_by, limit, offset, all_items): + """Describe a job""" + + get_job_instances_command(job_name, out, filter_by, limit, offset, all_items) + + +@delete.command("job") +@click.argument("job_names", nargs=-1) +def _delete_job(job_names): + """Deletes a job""" + + delete_job(job_names) diff --git a/framework/calm/dsl/cli/secret_commands.py b/framework/calm/dsl/cli/secret_commands.py new file mode 100644 index 0000000..6f4280b --- /dev/null +++ b/framework/calm/dsl/cli/secret_commands.py @@ -0,0 +1,64 @@ +import click + +from .main import get, create, update, delete, clear +from .secrets import ( + create_secret, + get_secrets, + delete_secret, + update_secret, + clear_secrets, +) + + +# TODO Apply --type = local/server parameter +@create.command("secret") +@click.argument("name", nargs=1) +@click.option( + "--value", + "-v", + prompt=True, + hide_input=True, + confirmation_prompt=True, + help="Value for secret", +) +def _create_secret(name, value): + """Creates a secret""" + + create_secret(name, value) + + +@get.command("secrets") +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only sceret names." +) +def _get_secrets(quiet): + """Get secrets""" + + get_secrets(quiet) + + +@delete.command("secret") +@click.argument("name", nargs=1) +def _delete_secret(name): + """Deletes a secret""" + + delete_secret(name) + + +@update.command("secret") +@click.argument("name", nargs=1) +@click.option("--value", "-v", prompt=True, hide_input=True, confirmation_prompt=True) +def _update_secret(name, value): + """Updates a secret + + NAME is the alias for your secret + """ + + update_secret(name, value) + + +@clear.command("secrets") +def _clear_secrets(): + """Delete all the secrets stored in the local db""" + + clear_secrets() diff --git a/framework/calm/dsl/cli/secrets.py b/framework/calm/dsl/cli/secrets.py new file mode 100644 index 0000000..8ae1d86 --- /dev/null +++ b/framework/calm/dsl/cli/secrets.py @@ -0,0 +1,107 @@ +import click +import arrow +import datetime +from prettytable import PrettyTable + +from .utils import highlight_text + +from calm.dsl.store import Secret +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def create_secret(name, value): + """Creates the secret""" + + secrets = get_secrets_names() + if name in secrets: + LOG.error("Secret {} already present !!!".format(name)) + return + + LOG.debug("Creating secret {}".format(name)) + Secret.create(name, value) + LOG.info(highlight_text("Secret {} created".format(name))) + + +def get_secrets(quiet): + """List the secrets""" + + avl_secrets = Secret.list() + + if not avl_secrets: + click.echo(highlight_text("No secret found !!!\n")) + return + + if quiet: + for secret in avl_secrets: + click.echo(highlight_text(secret["name"])) + return + + table = PrettyTable() + table.field_names = ["NAME", "CREATED ON", "LAST UPDATED", "UUID"] + + for secret in avl_secrets: + creation_time = (secret["creation_time"]).strftime("%A, %d. %B %Y %I:%M%p") + last_update_time = arrow.get( + secret["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(secret["name"]), + highlight_text(creation_time), + highlight_text(last_update_time), + highlight_text(secret["uuid"]), + ] + ) + + click.echo(table) + + +def delete_secret(name): + """Deletes the secret""" + + secrets = get_secrets_names() + if name not in secrets: + LOG.error("Secret {} not present !!!".format(name)) + return + + LOG.info("Deleting secret {}".format(name)) + Secret.delete(name) + + +def update_secret(name, value): + """Updates the secret""" + + secrets = get_secrets_names() + if name not in secrets: + LOG.error("Secret {} not present !!!".format(name)) + return + + LOG.info("Updating secret {}".format(name)) + Secret.update(name, value) + + +def find_secret(name, pass_phrase=""): + """Gives you the value stored correponding to secret""" + + secret_val = Secret.find(name, pass_phrase) + return secret_val + + +def get_secrets_names(): + """To find the names stored in db""" + + secrets = Secret.list() + secret_names = [] + for secret in secrets: + secret_names.append(secret["name"]) + + return secret_names + + +def clear_secrets(): + """Delete all the secrets""" + + LOG.info("Clearing the secrets") + Secret.clear() diff --git a/framework/calm/dsl/cli/task_commands.py b/framework/calm/dsl/cli/task_commands.py new file mode 100644 index 0000000..4beb2dc --- /dev/null +++ b/framework/calm/dsl/cli/task_commands.py @@ -0,0 +1,58 @@ +import click +import time + +from .main import watch +from .constants import ERGON_TASK +from calm.dsl.api import get_api_client, get_resource_api +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def watch_task(task_uuid, poll_interval=2): + + client = get_api_client() + Obj = get_resource_api("tasks", client.connection) + + cnt = 0 + while True: + LOG.info("Fetching status of task") + res, err = Obj.read(task_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + status = res["status"] + LOG.info(status) + + if status in ERGON_TASK.TERMINAL_STATES: + error_detail = res.get("error_detail", "") + if error_detail: + LOG.error(error_detail) + return status + + time.sleep(poll_interval) + cnt += 1 + if cnt == 10: + break + + LOG.info( + "Task couldn't reached to terminal state in {} seconds. Exiting...".format( + poll_interval * 10 + ) + ) + + +@watch.command("task") +@click.argument("task_uuid") +@click.option( + "--poll_interval", + "-p", + type=int, + default=2, + show_default=True, + help="Give polling interval", +) +def _watch_task(task_uuid, poll_interval): + """Watch a task""" + + watch_task(task_uuid=task_uuid, poll_interval=poll_interval) diff --git a/framework/calm/dsl/cli/user_commands.py b/framework/calm/dsl/cli/user_commands.py new file mode 100644 index 0000000..149a460 --- /dev/null +++ b/framework/calm/dsl/cli/user_commands.py @@ -0,0 +1,51 @@ +import click + +from .users import get_users, create_user, delete_user +from .main import get, create, delete + + +@get.command("users") +@click.option("--name", "-n", default=None, help="Search for users by name") +@click.option( + "--filter", "filter_by", "-f", default=None, help="Filter users by this string" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option("--quiet", "-q", is_flag=True, default=False, help="Show only user names") +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +def _get_users(name, filter_by, limit, offset, quiet, out): + """Get users, optionally filtered by a string""" + + get_users(name, filter_by, limit, offset, quiet, out) + + +@create.command("user") +@click.option("--name", "-n", required=True, help="Principal Name of user") +@click.option( + "--directory", + "-d", + "directory_service", + required=True, + help="Directory Service of user", +) +def _create_user(name, directory_service): + """Creates a user""" + + create_user(name, directory_service) + + +@delete.command("user") +@click.argument("user_names", nargs=-1) +def _delete_user(user_names): + """Deletes a user""" + + delete_user(user_names) diff --git a/framework/calm/dsl/cli/users.py b/framework/calm/dsl/cli/users.py new file mode 100644 index 0000000..d60e813 --- /dev/null +++ b/framework/calm/dsl/cli/users.py @@ -0,0 +1,178 @@ +import click +import json +import sys +from prettytable import PrettyTable + +from calm.dsl.api import get_api_client +from calm.dsl.config import get_context +from calm.dsl.builtins import Ref +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle + +from .utils import get_name_query, highlight_text +from .task_commands import watch_task +from .constants import ERGON_TASK + + +LOG = get_logging_handle(__name__) + + +def get_users(name, filter_by, limit, offset, quiet, out): + """Get the users, optionally filtered by a string""" + + client = get_api_client() + + params = {"length": limit, "offset": offset} + filter_query = "" + if name: + filter_query = get_name_query([name]) + if filter_by: + filter_query = filter_query + ";(" + filter_by + ")" + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + if filter_query: + params["filter"] = filter_query + + res, err = client.user.list(params=params) + + if err: + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + + LOG.warning("Cannot fetch users from {}".format(pc_ip)) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No user found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = ["NAME", "DISPLAY NAME", "TYPE", "STATE", "UUID"] + + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(row["resources"].get("display_name", "")), + highlight_text(row["resources"]["user_type"]), + highlight_text(row["state"]), + highlight_text(metadata["uuid"]), + ] + ) + + click.echo(table) + + +def create_user(name, directory_service): + + client = get_api_client() + + params = {"length": 1000} + user_name_uuid_map = client.user.get_name_uuid_map(params) + + if user_name_uuid_map.get("name"): + LOG.error("User with name {} already exists".format(name)) + sys.exit(-1) + + user_payload = { + "spec": { + "resources": { + "directory_service_user": { + "user_principal_name": name, + "directory_service_reference": Ref.DirectoryService( + directory_service + ), + } + } + }, + "metadata": {"kind": "user", "spec_version": 0}, + } + + res, err = client.user.create(user_payload) + if err: + LOG.error(err) + sys.exit(-1) + + res = res.json() + stdout_dict = { + "name": name, + "uuid": res["metadata"]["uuid"], + "execution_context": res["status"]["execution_context"], + } + click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) + + user_uuid = res["metadata"]["uuid"] + LOG.info("Polling on user creation task") + task_state = watch_task( + res["status"]["execution_context"]["task_uuid"], poll_interval=5 + ) + if task_state in ERGON_TASK.FAILURE_STATES: + LOG.exception("User creation task went to {} state".format(task_state)) + sys.exit(-1) + + # Update users in cache + LOG.info("Updating users cache ...") + Cache.add_one(entity_type=CACHE.ENTITY.USER, uuid=user_uuid) + LOG.info("[Done]") + + +def delete_user(user_names): + + client = get_api_client() + params = {"length": 1000} + user_name_uuid_map = client.user.get_name_uuid_map(params) + + deleted_user_uuids = [] + for name in user_names: + user_uuid = user_name_uuid_map.get(name, "") + if not user_uuid: + LOG.error("User {} doesn't exists".format(name)) + sys.exit(-1) + + res, err = client.user.delete(user_uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit(-1) + + deleted_user_uuids.append(user_uuid) + LOG.info("Polling on user deletion task") + res = res.json() + task_state = watch_task( + res["status"]["execution_context"]["task_uuid"], poll_interval=5 + ) + if task_state in ERGON_TASK.FAILURE_STATES: + LOG.exception("User deletion task went to {} state".format(task_state)) + sys.exit(-1) + + # Update users in cache + if deleted_user_uuids: + LOG.info("Updating users cache ...") + for _user_uuid in deleted_user_uuids: + Cache.delete_one(entity_type=CACHE.ENTITY.USER, uuid=_user_uuid) + LOG.info("[Done]") diff --git a/framework/calm/dsl/cli/utils.py b/framework/calm/dsl/cli/utils.py new file mode 100644 index 0000000..5633daf --- /dev/null +++ b/framework/calm/dsl/cli/utils.py @@ -0,0 +1,233 @@ +import click +import sys +import os +from functools import reduce +from asciimatics.screen import Screen +from click_didyoumean import DYMMixin +from distutils.version import LooseVersion as LV + +from calm.dsl.tools import get_module_from_file +from calm.dsl.api import get_api_client +from calm.dsl.constants import PROVIDER_ACCOUNT_TYPE_MAP +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def get_states_filter(STATES_CLASS=None, state_key="state", states=[]): + + if not states: + for field in vars(STATES_CLASS): + if not field.startswith("__"): + states.append(getattr(STATES_CLASS, field)) + state_prefix = ",{}==".format(state_key) + return ";({}=={})".format(state_key, state_prefix.join(states)) + + +def get_name_query(names): + if names: + search_strings = [ + "name==.*" + + reduce( + lambda acc, c: "{}[{}|{}]".format(acc, c.lower(), c.upper()), name, "" + ) + + ".*" + for name in names + ] + return "({})".format(",".join(search_strings)) + return "" + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def import_var_from_file(file, var, default_value=None): + try: + module = get_module_from_file(var, file) + return getattr(module, var) + except: # NoQA + return default_value + + +class Display: + @classmethod + def wrapper(cls, func, watch=False): + if watch and os.isatty(sys.stdout.fileno()): + Screen.wrapper(func, height=1000) + else: + func(display) + + def clear(self): + pass + + def refresh(self): + pass + + def wait_for_input(self, *args): + pass + + def print_at(self, text, x, *args, **kwargs): + click.echo("{}{}".format((" " * x), text)) + + +display = Display() + + +class FeatureFlagMixin: + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.feature_version_map = dict() + self.experimental_cmd_map = dict() + + def command(self, *args, **kwargs): + """Behaves the same as `click.Group.command()` except added an + `feature_min_version` flag which can be used to warn users if command + is not supported setup calm version. + """ + + feature_min_version = kwargs.pop("feature_min_version", None) + if feature_min_version and args: + self.feature_version_map[args[0]] = feature_min_version + + is_experimental = kwargs.pop("experimental", False) + if args: + self.experimental_cmd_map[args[0]] = is_experimental + + return super().command(*args, **kwargs) + + def invoke(self, ctx): + + if not ctx.protected_args: + return super(FeatureFlagMixin, self).invoke(ctx) + + cmd_name = ctx.protected_args[0] + + feature_min_version = self.feature_version_map.get(cmd_name, "") + if feature_min_version: + calm_version = Version.get_version("Calm") + if not calm_version: + LOG.error("Calm version not found. Please update cache") + sys.exit(-1) + + if LV(calm_version) >= LV(feature_min_version): + return super().invoke(ctx) + + else: + LOG.warning( + "Please update Calm (v{} -> >=v{}) to use this command.".format( + calm_version, feature_min_version + ) + ) + return None + + else: + return super().invoke(ctx) + + +class FeatureFlagGroup(FeatureFlagMixin, DYMMixin, click.Group): + """click Group that have *did-you-mean* functionality and adds *feature_min_version* paramter to each subcommand + which can be used to set minimum calm version for command""" + + pass + + +class FeatureDslOption(click.ParamType): + + name = "feature-dsl-option" + + def __init__(self, feature_min_version=""): + self.feature_min_version = feature_min_version + + def convert(self, value, param, ctx): + + if self.feature_min_version: + calm_version = Version.get_version("Calm") + if not calm_version: + LOG.error("Calm version not found. Please update cache") + sys.exit(-1) + + # TODO add the pc version to warning also + if LV(calm_version) < LV(self.feature_min_version): + LOG.error( + "Calm {} does not support '{}' option. Please upgrade server to Calm {}".format( + calm_version, param.name, self.feature_min_version + ) + ) + sys.exit(-1) + + # Add validation for file types etc. + return value + + +def get_account_details( + project_name, account_name, provider_type="AHV_VM", pe_account_needed=False +): + """returns object containing project and account details""" + + client = get_api_client() + + # Getting the account uuid map + account_type = PROVIDER_ACCOUNT_TYPE_MAP[provider_type] + params = {"length": 250, "filter": "state!=DELETED;type=={}".format(account_type)} + if account_name: + params["filter"] += ";name=={}".format(account_name) + + account_uuid_name_map = client.account.get_uuid_name_map(params) + provider_account_uuids = list(account_uuid_name_map.keys()) + + LOG.info("Fetching project '{}' details".format(project_name)) + params = {"length": 250, "filter": "name=={}".format(project_name)} + res, err = client.project.list(params) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + if res["metadata"]["total_matches"] == 0: + LOG.error("Project {} not found".format(project_name)) + sys.exit(-1) + + pj_data = res["entities"][0] + whitelisted_accounts = [ + account["uuid"] + for account in pj_data["status"]["resources"].get("account_reference_list", []) + ] + + project_uuid = pj_data["metadata"]["uuid"] + account_uuid = "" + for _account_uuid in whitelisted_accounts: + if _account_uuid in provider_account_uuids: + account_uuid = _account_uuid + break + + if not account_uuid: + LOG.error("No account with given details found in project") + sys.exit(-1) + + account_name = account_uuid_name_map[account_uuid] + + if pe_account_needed and provider_type == "AHV_VM": + LOG.info("Fetching account '{}' details".format(account_name)) + res, err = client.account.read(account_uuid) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + clusters = res["status"]["resources"]["data"].get( + "cluster_account_reference_list", [] + ) + if not clusters: + LOG.error( + "No cluster found in ahv account (uuid='{}')".format(account_uuid) + ) + sys.exit(-1) + + # Use cluster uuid for AHV account + account_uuid = clusters[0]["uuid"] + + return { + "project": {"name": project_name, "uuid": project_uuid}, + "account": {"name": account_name, "uuid": account_uuid}, + } diff --git a/framework/calm/dsl/cli/version_validator.py b/framework/calm/dsl/cli/version_validator.py new file mode 100644 index 0000000..39649ed --- /dev/null +++ b/framework/calm/dsl/cli/version_validator.py @@ -0,0 +1,22 @@ +from distutils.version import LooseVersion as LV + +from calm.dsl.store import Version +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) +LATEST_VERIFIED_VERSION = "2.9.7" + + +def validate_version(): + + # At initializing dsl, version might not found in cache + calm_version = Version.get_version("Calm") + if calm_version: + if LV(calm_version) < LV(LATEST_VERIFIED_VERSION): + LOG.warning( + "Calm server version ({}) is less than verified version. ({}).".format( + calm_version, LATEST_VERIFIED_VERSION + ) + ) + else: + LOG.warning("Version not found. Please update cache.") diff --git a/framework/calm/dsl/cli/vm_recovery_point_commands.py b/framework/calm/dsl/cli/vm_recovery_point_commands.py new file mode 100644 index 0000000..2cb52ec --- /dev/null +++ b/framework/calm/dsl/cli/vm_recovery_point_commands.py @@ -0,0 +1,116 @@ +import click +import json + +from .main import get +from prettytable import PrettyTable +from .utils import get_account_details, highlight_text + +from calm.dsl.api import get_api_client +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +@get.command("vm-recovery-points") +@click.option( + "--name", "-n", default=None, help="Search for vm recovery points by name" +) +@click.option("--limit", "-l", default=20, help="Number of results to return") +@click.option( + "--offset", "-s", default=0, help="Offset results by the specified amount" +) +@click.option( + "--quiet", "-q", is_flag=True, default=False, help="Show only recovery point names." +) +@click.option( + "--out", + "-o", + "out", + type=click.Choice(["text", "json"]), + default="text", + help="output format", +) +@click.option("--project", "-p", help="Project name", required=True) +@click.option("--account", "-a", help="Account name", default=None) +def _get_vm_list(name, limit, offset, quiet, out, project, account): + """ + Get vm recovery points + + \b + >: If there are multiple accounts per provider_type in project, user need to supply the account name + other than provider type (added in 3.2.0) + >: Available for ahv provider only + + """ + + get_vm_recovery_points(name, limit, offset, quiet, out, project, account) + + +def get_vm_recovery_points(name, limit, offset, quiet, out, project_name, account_name): + """displays vm recovery points for a account""" + + client = get_api_client() + account_detail = get_account_details( + project_name=project_name, account_name=account_name, provider_type="AHV_VM" + ) + + account_name = account_detail["account"]["name"] + account_uuid = account_detail["account"]["uuid"] + + account_uuid = account_detail["account"]["uuid"] + LOG.info("Using account '{}' for listing brownfield vms".format(account_name)) + + payload = { + "filter": "account_uuid=={}".format(account_uuid), + "length": limit, + "offset": offset, + } + if name: + payload["filter"] += ";name=={}".format(name) + + res, err = client.vm_recovery_point.list(payload) + if err: + LOG.warning( + "Cannot fetch vm recovery points from account {}".format(account_name) + ) + return + + res = res.json() + total_matches = res["metadata"]["total_matches"] + if total_matches > limit: + LOG.warning( + "Displaying {} out of {} entities. Please use --limit and --offset option for more results.".format( + limit, total_matches + ) + ) + + if out == "json": + click.echo(json.dumps(res, indent=4, separators=(",", ": "))) + return + + json_rows = res["entities"] + if not json_rows: + click.echo(highlight_text("No vm recovery point found !!!\n")) + return + + if quiet: + for _row in json_rows: + row = _row["status"] + click.echo(highlight_text(row["name"])) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "STATE", "TYPE", "PARENT_VM_REF"] + for _row in json_rows: + row = _row["status"] + metadata = _row["metadata"] + table.add_row( + [ + highlight_text(row["name"]), + highlight_text(metadata["uuid"]), + highlight_text(row["state"]), + highlight_text(row["resources"]["recovery_point_type"]), + highlight_text(row["resources"]["parent_vm_reference"]["uuid"]), + ] + ) + click.echo(table) diff --git a/framework/calm/dsl/config/__init__.py b/framework/calm/dsl/config/__init__.py new file mode 100644 index 0000000..e64e031 --- /dev/null +++ b/framework/calm/dsl/config/__init__.py @@ -0,0 +1,19 @@ +from .config import get_config_handle, set_dsl_config +from .context import get_context, init_context, get_default_connection_config +from .init_config import ( + get_default_config_file, + get_default_db_file, + get_default_local_dir, +) + + +__all__ = [ + "get_config_handle", + "set_dsl_config", + "get_context", + "init_context", + "get_default_config_file", + "get_default_db_file", + "get_default_local_dir", + "get_default_connection_config", +] diff --git a/framework/calm/dsl/config/config.ini.jinja2 b/framework/calm/dsl/config/config.ini.jinja2 new file mode 100644 index 0000000..86e4daf --- /dev/null +++ b/framework/calm/dsl/config/config.ini.jinja2 @@ -0,0 +1,24 @@ +{% macro ConfigTemplate(ip, port, username, password, project_name, db_location, log_level, retries_enabled, connection_timeout, read_timeout) -%} + +[SERVER] +pc_ip = {{ip}} +pc_port = {{port}} +pc_username = {{username}} +pc_password = {{password}} + +[PROJECT] +name = {{project_name}} + +[LOG] +level = {{log_level}} + +[CONNECTION] +retries_enabled = {{retries_enabled}} +connection_timeout = {{connection_timeout}} +read_timeout = {{read_timeout}} + +[CATEGORIES] +{%- endmacro %} + + +{{ConfigTemplate(ip, port, username, password, project_name, db_location, log_level, retries_enabled, connection_timeout, read_timeout)}} diff --git a/framework/calm/dsl/config/config.py b/framework/calm/dsl/config/config.py new file mode 100644 index 0000000..4063fac --- /dev/null +++ b/framework/calm/dsl/config/config.py @@ -0,0 +1,247 @@ +import os +import configparser +from jinja2 import Environment, PackageLoader + +from .schema import validate_config +from .init_config import get_init_config_handle +from calm.dsl.tools import make_file_dir +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class ConfigFileParser: + def __init__(self, config_file): + + config = configparser.RawConfigParser() + config.optionxform = str # Maintaining case sensitivity for field names + config.read(config_file) + + validate_config(config) + + config_obj = {} + for section in config.sections(): + config_obj[section] = {} + for k, v in config.items(section): + config_obj[section][k] = v + + self._CONFIG = config_obj + self._CONFIG_PARSER_OBJECT = config + + def get_server_config(self): + """returns server config""" + + if "SERVER" in self._CONFIG: + return self._CONFIG["SERVER"] + + else: + return {} + + def get_project_config(self): + """returns project config""" + + if "PROJECT" in self._CONFIG: + return self._CONFIG["PROJECT"] + + else: + return {} + + def get_log_config(self): + """returns log config""" + + if "LOG" in self._CONFIG: + return self._CONFIG["LOG"] + + else: + return {} + + def get_categories_config(self): + """returns categories config""" + + if "CATEGORIES" in self._CONFIG: + return self._CONFIG["CATEGORIES"] + + else: + return {} + + def get_connection_config(self): + """returns connection config""" + + connection_config = {} + if "CONNECTION" in self._CONFIG_PARSER_OBJECT: + for k, v in self._CONFIG_PARSER_OBJECT.items("CONNECTION"): + if k == "retries_enabled": + connection_config[k] = self._CONFIG_PARSER_OBJECT[ + "CONNECTION" + ].getboolean(k) + elif k in ["connection_timeout", "read_timeout"]: + connection_config[k] = self._CONFIG_PARSER_OBJECT[ + "CONNECTION" + ].getint(k) + else: + connection_config[k] = v + + return connection_config + + +class ConfigHandle: + def __init__(self, config_file=None): + + if not config_file: + init_config_handle = get_init_config_handle() + init_obj = init_config_handle.get_init_data() + config_file = init_obj["CONFIG"]["location"] + + config_obj = ConfigFileParser(config_file) + + self.server_config = config_obj.get_server_config() + self.project_config = config_obj.get_project_config() + self.log_config = config_obj.get_log_config() + self.categories_config = config_obj.get_categories_config() + self.connection_config = config_obj.get_connection_config() + + def get_server_config(self): + """returns server configuration""" + + return self.server_config + + def get_project_config(self): + """returns project configuration""" + + return self.project_config + + def get_log_config(self): + """returns logging configuration""" + + return self.log_config + + def get_categories_config(self): + """returns config categories""" + + return self.categories_config + + def get_connection_config(self): + """returns connection config""" + + return self.connection_config + + @classmethod + def get_init_config(cls): + + init_config_handle = get_init_config_handle() + return init_config_handle.get_init_data() + + @classmethod + def _render_config_template( + cls, + ip, + port, + username, + password, + project_name, + log_level, + retries_enabled, + connection_timeout, + read_timeout, + schema_file="config.ini.jinja2", + ): + """renders the config template""" + + loader = PackageLoader(__name__, "") + env = Environment(loader=loader) + template = env.get_template(schema_file) + text = template.render( + ip=ip, + port=port, + username=username, + password=password, + project_name=project_name, + log_level=log_level, + retries_enabled=retries_enabled, + connection_timeout=connection_timeout, + read_timeout=read_timeout, + ) + return text.strip() + os.linesep + + @classmethod + def update_config_file( + cls, + config_file, + host, + port, + username, + password, + project_name, + log_level, + retries_enabled, + connection_timeout, + read_timeout, + ): + """Updates the config file data""" + + LOG.debug("Rendering configuration template") + make_file_dir(config_file) + text = cls._render_config_template( + host, + port, + username, + password, + project_name, + log_level, + retries_enabled, + connection_timeout, + read_timeout, + ) + + LOG.debug("Writing configuration to '{}'".format(config_file)) + with open(config_file, "w") as fd: + fd.write(text) + + +def get_config_handle(config_file=None): + """returns ConfigHandle object""" + + return ConfigHandle(config_file) + + +def set_dsl_config( + host, + port, + username, + password, + project_name, + log_level, + db_location, + local_dir, + config_file, + retries_enabled, + connection_timeout, + read_timeout, +): + + """ + overrides the existing server/dsl configuration + Note: This helper assumes that valid configuration is present. It is invoked just to update the existing configuration. + + if config_file is given, it will update config file location in `init.ini` and update the server details in that file + + Note: Context will not be changed according to it. + """ + + init_config_handle = get_init_config_handle() + init_config_handle.update_init_config( + config_file=config_file, db_file=db_location, local_dir=local_dir + ) + + ConfigHandle.update_config_file( + config_file=config_file, + host=host, + port=port, + username=username, + password=password, + project_name=project_name, + log_level=log_level, + retries_enabled=retries_enabled, + connection_timeout=connection_timeout, + read_timeout=read_timeout, + ) diff --git a/framework/calm/dsl/config/context.py b/framework/calm/dsl/config/context.py new file mode 100644 index 0000000..adf7244 --- /dev/null +++ b/framework/calm/dsl/config/context.py @@ -0,0 +1,214 @@ +import os +import sys + +from .env_config import EnvConfig +from .config import get_config_handle +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +DEFAULT_RETRIES_ENABLED = True +DEFAILT_CONNECTION_TIMEOUT = 5 +DEFAULT_READ_TIMEOUT = 3000 + + +class Context: + def __init__(self): + + self.initialize_configuration() + + def initialize_configuration(self): + """initializes the confguration for context + Priority (Decreases from 1 -> 3): + 1.) Config file passed as param + 2.) Environment Variables + 3.) Config file stored in init.ini + """ + + config_handle = get_config_handle() + self.server_config = config_handle.get_server_config() + self.project_config = config_handle.get_project_config() + self.log_config = config_handle.get_log_config() + self.categories_config = config_handle.get_categories_config() + self.connection_config = config_handle.get_connection_config() + # Override with env data + self.server_config.update(EnvConfig.get_server_config()) + self.project_config.update(EnvConfig.get_project_config()) + self.log_config.update(EnvConfig.get_log_config()) + + init_config = config_handle.get_init_config() + self._CONFIG_FILE = init_config["CONFIG"]["location"] + self._PROJECT = self.project_config.get("name", "") + + def reset_configuration(self): + """Resets the configuration""" + + LOG.debug("Resetting configuration in dsl context") + self.initialize_configuration() + + def validate_init_config(self): + """validates the init config""" + + config_handle = get_config_handle() + init_config = config_handle.get_init_config() + + if self._CONFIG_FILE == init_config["CONFIG"]["location"]: + if not os.path.exists(self._CONFIG_FILE): + LOG.error("Invalid config file location '{}'".format(self._CONFIG_FILE)) + sys.exit(-1) + + def get_server_config(self): + """returns server configuration""" + + config = self.server_config + try: # if all server variables are present either in env or some other way, not required to validate config file + if not config.get("pc_ip"): + LOG.error( + "Host IP not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_IP'" + ) + sys.exit(-1) + + if not config.get("pc_port"): + LOG.error( + "Host Port not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_PORT'" + ) + sys.exit(-1) + + if not config.get("pc_username"): + LOG.error( + "Host username not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_USERNAME'" + ) + sys.exit(-1) + + if not config.get("pc_password"): + LOG.error( + "Host password not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_PASSWORD'" + ) + sys.exit(-1) + + except: # validate init_config file, if it's contents are valid + self.validate_init_config() + raise + + return config + + def get_project_config(self): + """returns project configuration""" + + config = self.project_config + if not config.get("name"): + LOG.warning( + "Default project not found in config file or environment('CALM_DSL_DEFAULT_PROJECT' variable). Setting it to 'default' project" + ) + config["name"] = "default" + + return config + + def get_connection_config(self): + """returns connection configuration""" + + config = self.connection_config + if "retries_enabled" not in config: + config[ + "retries_enabled" + ] = DEFAULT_RETRIES_ENABLED # TODO check boolean is supported by ini + if "connection_timeout" not in config: + config["connection_timeout"] = DEFAILT_CONNECTION_TIMEOUT + if "read_timeout" not in config: + config["read_timeout"] = DEFAULT_READ_TIMEOUT + + return config + + def get_log_config(self): + """returns logging configuration""" + + config = self.log_config + if not config.get("level"): + LOG.warning( + "Default log-level not found in config file or environment('CALM_DSL_LOG_LEVEL'). Setting it to 'INFO' level" + ) + config["level"] = "INFO" + + return config + + def get_categories_config(self): + """returns config categories""" + + return self.categories_config + + def get_init_config(self): + """returns init configuration""" + + config_handle = get_config_handle() + return config_handle.get_init_config() + + def update_project_context(self, project_name): + """Overrides the existing project configuration""" + + self._PROJECT = project_name + LOG.debug("Updating project in dsl context to {}".format(project_name)) + self.project_config["name"] = project_name + + def update_config_file_context(self, config_file): + """Overrides the existing configuration with passed file configuration""" + + LOG.debug("Updating config file in dsl context to {}".format(config_file)) + self._CONFIG_FILE = config_file + cxt_config_handle = get_config_handle(self._CONFIG_FILE) + self.server_config.update(cxt_config_handle.get_server_config()) + self.project_config.update(cxt_config_handle.get_project_config()) + self.log_config.update(cxt_config_handle.get_log_config()) + self.connection_config.update(cxt_config_handle.get_connection_config()) + + if cxt_config_handle.get_categories_config(): + self.categories_config = cxt_config_handle.get_categories_config() + + def print_config(self): + """prints the configuration""" + + server_config = self.get_server_config() + project_config = self.get_project_config() + log_config = self.get_log_config() + connection_config = self.get_connection_config() + + ConfigHandle = get_config_handle() + config_str = ConfigHandle._render_config_template( + ip=server_config["pc_ip"], + port=server_config["pc_port"], + username=server_config["pc_username"], + password="xxxxxxxx", # Do not render password + project_name=project_config["name"], + log_level=log_config["level"], + retries_enabled=connection_config["retries_enabled"], + connection_timeout=connection_config["connection_timeout"], + read_timeout=connection_config["read_timeout"], + ) + + print(config_str) + + +_ContextHandle = None + + +def init_context(): + + global _ContextHandle + _ContextHandle = Context() + + +def get_context(): + global _ContextHandle + if not _ContextHandle: + init_context() + + return _ContextHandle + + +def get_default_connection_config(): + """Returns default connection config""" + + return { + "connection_timeout": DEFAILT_CONNECTION_TIMEOUT, + "read_timeout": DEFAULT_READ_TIMEOUT, + "retries_enabled": DEFAULT_RETRIES_ENABLED, + } diff --git a/framework/calm/dsl/config/env_config.py b/framework/calm/dsl/config/env_config.py new file mode 100644 index 0000000..a17097f --- /dev/null +++ b/framework/calm/dsl/config/env_config.py @@ -0,0 +1,65 @@ +import os + + +class EnvConfig: + pc_ip = os.environ.get("CALM_DSL_PC_IP") or "" + pc_port = os.environ.get("CALM_DSL_PC_PORT") or "" + pc_username = os.environ.get("CALM_DSL_PC_USERNAME") or "" + pc_password = os.environ.get("CALM_DSL_PC_PASSWORD") or "" + default_project = os.environ.get("CALM_DSL_DEFAULT_PROJECT") or "" + log_level = os.environ.get("CALM_DSL_LOG_LEVEL") or "" + + config_file_location = os.environ.get("CALM_DSL_CONFIG_FILE_LOCATION") or "" + local_dir_location = os.environ.get("CALM_DSL_LOCAL_DIR_LOCATION") or "" + db_location = os.environ.get("CALM_DSL_DB_LOCATION") + + @classmethod + def get_server_config(cls): + + config = {} + if cls.pc_ip: + config["pc_ip"] = cls.pc_ip + + if cls.pc_port: + config["pc_port"] = cls.pc_port + + if cls.pc_username: + config["pc_username"] = cls.pc_username + + if cls.pc_password: + config["pc_password"] = cls.pc_password + + return config + + @classmethod + def get_project_config(cls): + + config = {} + if cls.default_project: + config["name"] = cls.default_project + + return config + + @classmethod + def get_log_config(cls): + + config = {} + if cls.log_level: + config["level"] = cls.log_level + + return config + + @classmethod + def get_init_config(cls): + + config = {} + if cls.config_file_location: + config["config_file_location"] = cls.config_file_location + + if cls.local_dir_location: + config["local_dir_location"] = cls.local_dir_location + + if cls.db_location: + config["db_location"] = cls.db_location + + return config diff --git a/framework/calm/dsl/config/init.ini.jinja2 b/framework/calm/dsl/config/init.ini.jinja2 new file mode 100644 index 0000000..017379d --- /dev/null +++ b/framework/calm/dsl/config/init.ini.jinja2 @@ -0,0 +1,13 @@ +{% macro InitTemplate(config_file, db_file, local_dir) -%} + +[CONFIG] +location = {{config_file}} + +[DB] +location = {{db_file}} + +[LOCAL_DIR] +location = {{local_dir}} +{%- endmacro %} + +{{InitTemplate(config_file, db_file, local_dir)}} diff --git a/framework/calm/dsl/config/init_config.py b/framework/calm/dsl/config/init_config.py new file mode 100644 index 0000000..eac8b71 --- /dev/null +++ b/framework/calm/dsl/config/init_config.py @@ -0,0 +1,143 @@ +import os +import configparser +from jinja2 import Environment, PackageLoader + +from .schema import validate_init_config +from .env_config import EnvConfig +from calm.dsl.tools import make_file_dir +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +INIT_FILE_LOCATION = os.path.join(os.path.expanduser("~"), ".calm", "init.ini") +DEFAULT_CONFIG_FILE = os.path.join(os.path.expanduser("~"), ".calm", "config.ini") +DEFAULT_DB_LOCATION = os.path.join(os.path.expanduser("~"), ".calm", "dsl.db") +DEFAULT_LOCAL_DIR_LOCATION = os.path.join(os.path.expanduser("~"), ".calm", ".local") + + +class InitConfigHandle: + def __init__(self): + + self.initialize_configuration() + + def initialize_configuration(self): + """initializes the confguration for context + Priority (Decreases from 1 -> 2): + 1.) Environment Variables + 2.) Config file stored in init.ini + """ + + init_file = INIT_FILE_LOCATION + init_config = configparser.RawConfigParser() + init_config.optionxform = str + init_config.read(init_file) + + # Validate init config + if not validate_init_config(init_config): + raise ValueError( + "Invalid init config file: {}. Please run: calm init dsl".format( + init_file + ) + ) + + config_obj = {} + for section in init_config.sections(): + config_obj[section] = {} + for k, v in init_config.items(section): + config_obj[section][k] = v + + env_init_config = EnvConfig.get_init_config() + + if not config_obj.get("CONFIG", {}).get("location"): + make_file_dir(DEFAULT_CONFIG_FILE) + config_obj["CONFIG"] = {"location": DEFAULT_CONFIG_FILE} + + if env_init_config.get("config_file_location"): + config_obj["CONFIG"]["location"] = env_init_config["config_file_location"] + + if not config_obj.get("DB", {}).get("location"): + make_file_dir(DEFAULT_DB_LOCATION) + config_obj["DB"] = {"location": DEFAULT_DB_LOCATION} + + if env_init_config.get("db_location"): + config_obj["DB"]["location"] = env_init_config["db_location"] + + if not config_obj.get("LOCAL_DIR", {}).get("location"): + make_file_dir(DEFAULT_LOCAL_DIR_LOCATION) + config_obj["LOCAL_DIR"] = {"location": DEFAULT_LOCAL_DIR_LOCATION} + + if env_init_config.get("local_dir_location"): + config_obj["LOCAL_DIR"]["location"] = env_init_config["local_dir_location"] + + self._CONFIG = config_obj + + def get_init_data(self): + + return self._CONFIG + + def update_init_config(self, config_file, db_file, local_dir): + """updates the init file data""" + + # create required directories + make_file_dir(config_file) + make_file_dir(db_file) + make_file_dir(local_dir, is_dir=True) + + # Note: No need to validate init data as it is rendered by template + init_file = INIT_FILE_LOCATION + make_file_dir(init_file) + + LOG.debug("Rendering init template") + text = self._render_init_template(config_file, db_file, local_dir) + + # Write init configuration + LOG.debug("Writing configuration to '{}'".format(init_file)) + with open(init_file, "w") as fd: + fd.write(text) + + # reinitialize latest configuration + self.initialize_configuration() + + @staticmethod + def _render_init_template( + config_file, db_file, local_dir, schema_file="init.ini.jinja2" + ): + """renders the init template""" + + loader = PackageLoader(__name__, "") + env = Environment(loader=loader) + template = env.get_template(schema_file) + text = template.render( + config_file=config_file, db_file=db_file, local_dir=local_dir + ) + return text.strip() + os.linesep + + +_INIT_CONFIG_HANDLE = None + + +def get_init_config_handle(): + + global _INIT_CONFIG_HANDLE + if not _INIT_CONFIG_HANDLE: + _INIT_CONFIG_HANDLE = InitConfigHandle() + + return _INIT_CONFIG_HANDLE + + +def get_default_config_file(): + """Returns default location of config file""" + + return DEFAULT_CONFIG_FILE + + +def get_default_db_file(): + """Returns default location of db file""" + + return DEFAULT_DB_LOCATION + + +def get_default_local_dir(): + """Returns the default location for local dir""" + + return DEFAULT_LOCAL_DIR_LOCATION diff --git a/framework/calm/dsl/config/schema.py b/framework/calm/dsl/config/schema.py new file mode 100644 index 0000000..31e5b13 --- /dev/null +++ b/framework/calm/dsl/config/schema.py @@ -0,0 +1,45 @@ +from schema import Schema, And, Use, SchemaError, Optional + + +config_schema_dict = { + Optional("SERVER"): { + Optional("pc_ip"): And(Use(str)), + Optional("pc_port"): And(Use(str)), + Optional("pc_username"): And(Use(str)), + Optional("pc_password"): And(Use(str)), + }, + Optional("PROJECT"): {Optional("name"): And(Use(str))}, + Optional("LOG"): {Optional("level"): And(Use(str))}, + Optional("CATEGORIES"): {}, +} + + +init_schema_dict = { + Optional("DB"): {Optional("location"): And(Use(str))}, # NoQA + Optional("LOCAL_DIR"): {Optional("location"): And(Use(str))}, # NoQA + Optional("CONFIG"): {Optional("location"): And(Use(str))}, # NoQA +} + + +def validate_config(config): + """validates the config schema""" + + return validate_schema(config, config_schema_dict) + + +def validate_init_config(config): + """valdiates the init schema""" + + return validate_schema(config, init_schema_dict) + + +def validate_schema(config, schema_dict): + """validates the config with the schema dict""" + + config_schema = Schema(schema_dict) + config_dict = {s: dict(config.items(s)) for s in config.sections()} + try: + config_schema.validate(config_dict) + return True + except SchemaError: + return False diff --git a/framework/calm/dsl/constants.py b/framework/calm/dsl/constants.py new file mode 100644 index 0000000..da99860 --- /dev/null +++ b/framework/calm/dsl/constants.py @@ -0,0 +1,69 @@ +""" + Calm-DSL constants +""" + + +class CACHE: + """Cache constants""" + + class ENTITY: + AHV_CLUSTER = "ahv_cluster" + AHV_VPC = "ahv_vpc" + AHV_SUBNET = "ahv_subnet" + AHV_DISK_IMAGE = "ahv_disk_image" + ACCOUNT = "account" + RESOURCE_TYPE = "resource_type" + PROJECT = "project" + USER = "user" + ROLE = "role" + DIRECTORY_SERVICE = "directory_service" + USER_GROUP = "user_group" + AHV_NETWORK_FUNCTION_CHAIN = "ahv_network_function_chain" + ENVIRONMENT = "environment" + + +PROVIDER_ACCOUNT_TYPE_MAP = { + "AWS_VM": "aws", + "VMWARE_VM": "vmware", + "AHV_VM": "nutanix_pc", + "AZURE_VM": "azure", + "GCP_VM": "gcp", +} + + +class PROJECT_TASK: + class STATUS: + PENDING = "pending" + RUNNING = "running" + ABORTED = "aborted" + SUCCESS = "success" + SUSPENDED = "waiting" + FAILURE = "failure" + + TERMINAL_STATES = [ + STATUS.ABORTED, + STATUS.SUCCESS, + STATUS.FAILURE, + ] + + NON_TERMINAL_STATES = [ + STATUS.RUNNING, + STATUS.PENDING, + ] + + FAILURE_STATES = [STATUS.ABORTED, STATUS.SUSPENDED, STATUS.FAILURE] + + +class NETWORK_GROUP_TUNNEL_TASK: + class STATUS: + SUCCESS = "Succeeded" + FAILURE = "Failed" + ABORTED = "Aborted" + QUEUED = "Queued" + + TERMINAL_STATES = [ + STATUS.ABORTED, + STATUS.SUCCESS, + STATUS.FAILURE, + ] + FAILURE_STATES = [STATUS.ABORTED, STATUS.FAILURE] diff --git a/framework/calm/dsl/crypto/__init__.py b/framework/calm/dsl/crypto/__init__.py new file mode 100644 index 0000000..4cd8240 --- /dev/null +++ b/framework/calm/dsl/crypto/__init__.py @@ -0,0 +1,3 @@ +from .crypto import Crypto + +__all__ = ["Crypto"] diff --git a/framework/calm/dsl/crypto/crypto.py b/framework/calm/dsl/crypto/crypto.py new file mode 100644 index 0000000..535a51f --- /dev/null +++ b/framework/calm/dsl/crypto/crypto.py @@ -0,0 +1,49 @@ +from Crypto.Cipher import AES +import scrypt +import os + + +# Crypto class for encryption/decryption + + +class Crypto: + @staticmethod + def encrypt_AES_GCM(msg, password, kdf_salt=None, nonce=None): + """Used for encryption of msg""" + + kdf_salt = kdf_salt or os.urandom(16) + nonce = nonce or os.urandom(16) + + # Encoding of message + msg = msg.encode() + secret_key = Crypto.generate_key(kdf_salt, password) + aes_cipher = AES.new(secret_key, AES.MODE_GCM, nonce=nonce) + ciphertext, auth_tag = aes_cipher.encrypt_and_digest(msg) + + return (kdf_salt, ciphertext, nonce, auth_tag) + + @staticmethod + def decrypt_AES_GCM(encryptedMsg, password, kdf_salt=None, nonce=None): + """Used for decryption of msg""" + + (stored_kdf_salt, ciphertext, stored_nonce, auth_tag) = encryptedMsg + kdf_salt = kdf_salt or stored_kdf_salt + nonce = nonce or stored_nonce + + secret_key = Crypto.generate_key(kdf_salt, password) + aes_cipher = AES.new(secret_key, AES.MODE_GCM, nonce=nonce) + plaintext = aes_cipher.decrypt_and_verify(ciphertext, auth_tag) + + # decoding byte data to normal string data + plaintext = plaintext.decode("utf8") + + return plaintext + + @staticmethod + def generate_key(kdf_salt, password, iterations=16384, r=8, p=1, buflen=32): + """Generates the key that is used for encryption/decryption""" + + secret_key = scrypt.hash( + password, kdf_salt, N=iterations, r=r, p=p, buflen=buflen + ) + return secret_key diff --git a/framework/calm/dsl/db/__init__.py b/framework/calm/dsl/db/__init__.py new file mode 100644 index 0000000..65465ea --- /dev/null +++ b/framework/calm/dsl/db/__init__.py @@ -0,0 +1,3 @@ +from .handler import get_db_handle, init_db_handle + +__all__ = ["get_db_handle", "init_db_handle"] diff --git a/framework/calm/dsl/db/handler.py b/framework/calm/dsl/db/handler.py new file mode 100644 index 0000000..dc27d52 --- /dev/null +++ b/framework/calm/dsl/db/handler.py @@ -0,0 +1,113 @@ +import atexit +import os + +from calm.dsl.config import get_context +from .table_config import dsl_database, SecretTable, DataTable, VersionTable +from .table_config import CacheTableBase +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class Database: + """DSL database connection""" + + db = None + registered_tables = [] + + @classmethod + def update_db(cls, db_instance): + cls.db = db_instance + + @staticmethod + def instantiate_db(): + ContextObj = get_context() + init_obj = ContextObj.get_init_config() + db_location = init_obj["DB"]["location"] + dsl_database.init(db_location) + return dsl_database + + def __init__(self): + self.update_db(self.instantiate_db()) + self.connect() + self.secret_table = self.set_and_verify(SecretTable) + self.data_table = self.set_and_verify(DataTable) + self.version_table = self.set_and_verify(VersionTable) + + for table_type, table in CacheTableBase.tables.items(): + setattr(self, table_type, self.set_and_verify(table)) + + def set_and_verify(self, table_cls): + """Verify whether this class exists in db + If not, then creates one + """ + + if not self.db.table_exists((table_cls.__name__).lower()): + self.db.create_tables([table_cls]) + + # Register table to class + if table_cls not in self.registered_tables: + self.registered_tables.append(table_cls) + + return table_cls + + def is_closed(self): + """return True if db connection is closed else False""" + + if self.db: + return self.db.is_closed() + + # If db not found, return true + return True + + def connect(self): + + LOG.debug("Connecting to local DB") + self.db.connect() + atexit.register(self.close) + + def close(self): + + LOG.debug("Closing connection to local DB") + self.db.close() + + +_Database = None + + +def get_db_handle(): + """Returns the db handle""" + + global _Database + if not _Database: + _Database = Database() + + return _Database + + +def init_db_handle(): + """Initializes database module and replaces the existing one""" + + global _Database + + try: + # Closing existing connection if exists + if not _Database.is_closed(): + # Unregister close() method from atexit handler + atexit.unregister(_Database.close) + + # Close the connection + _Database.close() + + except: # noqa + pass + + # Removing existing db at init location if exists + ContextObj = get_context() + init_obj = ContextObj.get_init_config() + db_location = init_obj["DB"]["location"] + if os.path.exists(db_location): + os.remove(db_location) + + # Initialize new database object + _Database = Database() diff --git a/framework/calm/dsl/db/table_config.py b/framework/calm/dsl/db/table_config.py new file mode 100644 index 0000000..f435a84 --- /dev/null +++ b/framework/calm/dsl/db/table_config.py @@ -0,0 +1,2653 @@ +from peewee import ( + SqliteDatabase, + Model, + CharField, + BlobField, + DateTimeField, + ForeignKeyField, + BooleanField, + CompositeKey, + DoesNotExist, + IntegerField, +) +import datetime +import click +import arrow +import json +import sys +from prettytable import PrettyTable + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.config import get_context +from calm.dsl.log import get_logging_handle +from calm.dsl.constants import CACHE + +LOG = get_logging_handle(__name__) +# Proxy database +dsl_database = SqliteDatabase(None) + + +class BaseModel(Model): + class Meta: + database = dsl_database + + +class SecretTable(BaseModel): + name = CharField(primary_key=True) + uuid = CharField() + creation_time = DateTimeField(default=datetime.datetime.now()) + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self): + return { + "name": self.name, + "uuid": self.uuid, + "creation_time": self.creation_time, + "last_update_time": self.last_update_time, + } + + +class DataTable(BaseModel): + secret_ref = ForeignKeyField(SecretTable, backref="data") + kdf_salt = BlobField() + ciphertext = BlobField() + iv = BlobField() + auth_tag = BlobField() + pass_phrase = BlobField() + + def generate_enc_msg(self): + return (self.kdf_salt, self.ciphertext, self.iv, self.auth_tag) + + +class CacheTableBase(BaseModel): + tables = {} + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + cache_type = cls.get_cache_type() + if not cache_type: + raise TypeError("Base table does not have a cache type attribute") + + cls.tables[cache_type] = cls + + def get_detail_dict(self): + raise NotImplementedError( + "get_detail_dict helper not implemented for {} table".format( + self.get_cache_type() + ) + ) + + @classmethod + def get_provider_plugin(self, provider_type="AHV_VM"): + """returns the provider plugin""" + + # Not a top-level import because of : https://github.com/ideadevice/calm-dsl/issues/33 + from calm.dsl.providers import get_provider + + return get_provider(provider_type) + + @classmethod + def get_cache_tables(cls): + return cls.tables + + @classmethod + def get_cache_type(cls): + """return cache type for the table""" + + return getattr(cls, "__cache_type__", None) + + @classmethod + def clear(cls): + """removes entire data from table""" + raise NotImplementedError( + "clear helper not implemented for {} table".format(cls.get_cache_type()) + ) + + @classmethod + def show_data(cls): + raise NotImplementedError( + "show_data helper not implemented for {} table".format(cls.get_cache_type()) + ) + + @classmethod + def sync(cls): + raise NotImplementedError( + "sync helper not implemented for {} table".format(cls.get_cache_type()) + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + raise NotImplementedError( + "create_entry helper not implemented for {} table".format( + cls.get_cache_type() + ) + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + raise NotImplementedError( + "get_entity_data helper not implemented for {} table".format( + cls.get_cache_type() + ) + ) + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + raise NotImplementedError( + "get_entity_data_using_uuid helper not implemented for {} table".format( + cls.get_cache_type() + ) + ) + + @classmethod + def fetch_one(cls, uuid): + raise NotImplementedError( + "fetch one helper not implemented for {} table".format(cls.get_cache_type()) + ) + + @classmethod + def add_one(cls, uuid, **kwargs): + raise NotImplementedError( + "add_one helper not implemented for {} table".format(cls.get_cache_type()) + ) + + @classmethod + def delete_one(cls, uuid, **kwargs): + raise NotImplementedError( + "delete_one helper not implemented for {} table".format( + cls.get_cache_type() + ) + ) + + @classmethod + def update_one(cls, uuid, **kwargs): + raise NotImplementedError( + "update_one helper not implemented for {} table".format( + cls.get_cache_type() + ) + ) + + +class AccountCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.ACCOUNT + feature_min_version = "2.7.0" + is_policy_required = False + name = CharField() + uuid = CharField() + provider_type = CharField() + state = CharField() + is_host = BooleanField(default=False) # Used for Ntnx accounts only + data = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "provider_type": self.provider_type, + "state": self.state, + "is_host": self.is_host, + "data": json.loads(self.data), + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "PROVIDER_TYPE", "UUID", "STATE", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["provider_type"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["state"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + provider_type = kwargs.get("provider_type", "") + if not provider_type: + LOG.error("Provider type not supplied for fetching user {}".format(name)) + sys.exit(-1) + + is_host = kwargs.get("is_host", False) + data = kwargs.get("data", "{}") + state = kwargs.get("state", "") + + super().create( + name=name, + uuid=uuid, + provider_type=provider_type, + is_host=is_host, + data=data, + state=state, + ) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + payload = { + "length": 250, + "filter": "(state==ACTIVE,state==VERIFIED)", + } + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res.get("entities", []): + provider_type = entity["status"]["resources"]["type"] + data = {} + query_obj = { + "name": entity["status"]["name"], + "uuid": entity["metadata"]["uuid"], + "provider_type": entity["status"]["resources"]["type"], + "state": entity["status"]["resources"]["state"], + } + + if provider_type == "nutanix_pc": + query_obj["is_host"] = entity["status"]["resources"]["data"]["host_pc"] + + # store cluster accounts for PC account (Note it will store cluster name not account name) + for pe_acc in ( + entity["status"]["resources"] + .get("data", {}) + .get("cluster_account_reference_list", []) + ): + group = data.setdefault("clusters", {}) + group[pe_acc["uuid"]] = ( + pe_acc.get("resources", {}) + .get("data", {}) + .get("cluster_name", "") + ) + + elif provider_type == "nutanix": + data["pc_account_uuid"] = entity["status"]["resources"]["data"][ + "pc_account_uuid" + ] + + query_obj["data"] = json.dumps(data) + cls.create_entry(**query_obj) + + @classmethod + def get_entity_data(cls, name, **kwargs): + query_obj = {"name": name} + + provider_type = kwargs.get("provider_type", "") + if provider_type: + query_obj["provider_type"] = provider_type + + try: + # The get() method is shorthand for selecting with a limit of 1 + # If more than one row is found, the first row returned by the database cursor + entity = super().get(**query_obj) + return entity.get_detail_dict() + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class AhvClustersCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.AHV_CLUSTER + feature_min_version = "3.5.0" + is_policy_required = False + name = CharField() + uuid = CharField() + pe_account_uuid = CharField(default="") + account_uuid = CharField(default="") + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self): + return { + "name": self.name, + "uuid": self.uuid, + "pe_account_uuid": self.pe_account_uuid, + "account_uuid": self.account_uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "UUID", + "PE_ACCOUNT_UUID", + "ACCOUNT_UUID", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["pe_account_uuid"]), + highlight_text(entity_data["account_uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + payload = {"length": 250, "filter": "state==VERIFIED;type==nutanix_pc"} + account_name_uuid_map = client.account.get_name_uuid_map(payload) + + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + for pc_acc_name, pc_acc_uuid in account_name_uuid_map.items(): + try: + res = AhvObj.clusters(account_uuid=pc_acc_uuid) + except Exception: + LOG.warning( + "Unable to fetch clusters for Nutanix_PC Account(uuid={})".format( + pc_acc_name + ) + ) + continue + + # TODO the order of cache sync is how their model is defined in table_config.py file + account = AccountCache.get(uuid=pc_acc_uuid) + account_clusters_data = json.loads(account.data).get("clusters", {}) + account_clusters_data_rev = {v: k for k, v in account_clusters_data.items()} + + for entity in res.get("entities", []): + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + cluster_resources = entity["status"]["resources"] + service_list = cluster_resources.get("config", {}).get( + "service_list", [] + ) + + # Here, AHV denotes the 'PE' functionality of a cluster + if "AOS" not in service_list: + LOG.debug( + "Cluster '{}' with UUID '{}' having function {} is not an AHV PE cluster".format( + name, uuid, service_list + ) + ) + continue + + # For esxi clusters, there will not be any pe account + if not account_clusters_data_rev.get(name, ""): + LOG.debug( + "Ignoring cluster '{}' with uuid '{}', as it doesn't have any pc account".format( + name, uuid + ) + ) + continue + + cls.create_entry( + name=name, + uuid=uuid, + pe_account_uuid=account_clusters_data_rev.get(name, ""), + account_uuid=pc_acc_uuid, + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + """ + Creates an entry for an AHV PE Cluster. + + Args: + name: Name of the AHV PE cluster. + uuid: UUID of the AHV PE cluster. + """ + account_uuid = kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account UUID not supplied for AHV PE Cluster {}".format(name)) + sys.exit(-1) + + pe_account_uuid = kwargs.get("pe_account_uuid", "") + if not pe_account_uuid: + LOG.error("PE Cluster UUID not supplied for AHV PE Cluster {}".format(name)) + sys.exit(-1) + + # store data in table + super().create( + name=name, + uuid=uuid, + pe_account_uuid=pe_account_uuid, + account_uuid=account_uuid, + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + query_obj = {"name": name} + pe_account_uuid = kwargs.get("pe_account_uuid", "") + if pe_account_uuid: + query_obj["pe_account_uuid"] = pe_account_uuid + + account_uuid = kwargs.get("account_uuid", "") + if account_uuid: + query_obj["account_uuid"] = account_uuid + + try: + # The get() method is shorthand for selecting with a limit of 1 + # If more than one row is found, the first row returned by the database cursor + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + pe_account_uuid = kwargs.get("pe_account", "") + + try: + if pe_account_uuid: + entity = super().get( + cls.uuid == uuid, cls.pe_account == pe_account_uuid + ) + else: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid", "account_uuid") + + +class AhvVpcsCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.AHV_VPC + feature_min_version = "3.5.0" + is_policy_required = False + name = CharField() + uuid = CharField() + account_uuid = CharField(default="") + tunnel_name = CharField(default="") + tunnel_uuid = CharField(default="") + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self): + return { + "name": self.name, + "uuid": self.uuid, + "account_uuid": self.account_uuid, + "tunnel_name": self.tunnel_name, + "tunnel_uuid": self.tunnel_uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "UUID", + "TUNNEL_NAME", + "TUNNEL_UUID", + "ACCOUNT_UUID", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text( + entity_data["tunnel_name"] + if entity_data["tunnel_name"] != "" + else "-" + ), + highlight_text( + entity_data["tunnel_uuid"] + if entity_data["tunnel_uuid"] != "" + else "-" + ), + highlight_text(entity_data["account_uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + payload = {"length": 250, "filter": "state==VERIFIED;type==nutanix_pc"} + account_name_uuid_map = client.account.get_name_uuid_map(payload) + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + # Get all Calm vpcs and Tunnels + calm_vpc_entities = client.network_group.list_all() + for pc_acc_name, pc_acc_uuid in account_name_uuid_map.items(): + try: + res = AhvObj.vpcs(account_uuid=pc_acc_uuid) + except Exception: + LOG.warning( + "Unable to fetch vpcs for Nutanix_PC Account(uuid={})".format( + pc_acc_name + ) + ) + continue + + for entity in res.get("entities", []): + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + + # TODO improve this, it shouldn't iterate over these entities every time + tunnel_reference = next( + ( + calm_vpc["status"]["resources"].get("tunnel_reference", {}) + for calm_vpc in calm_vpc_entities + if uuid + in calm_vpc["status"]["resources"].get( + "platform_vpc_uuid_list", [] + ) + ), + {}, + ) + + cls.create_entry( + name=name, + uuid=uuid, + account_uuid=pc_acc_uuid, + tunnel_reference=tunnel_reference, + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + """ + Creates an entry for an AHV PE Cluster. + + Args: + name: Name of the AHV PE cluster. + uuid: UUID of the AHV PE cluster. + """ + account_uuid = kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account UUID not supplied for VPC {}".format(name)) + sys.exit(-1) + tunnel_reference = kwargs.get("tunnel_reference", {}) + kwargs = {"name": name, "uuid": uuid, "account_uuid": account_uuid} + if tunnel_reference: + kwargs["tunnel_name"] = tunnel_reference.get("name", "") + kwargs["tunnel_uuid"] = tunnel_reference.get("uuid", "") + + # store data in table + super().create(**kwargs) + + @classmethod + def get_entity_data(cls, name, **kwargs): + query_obj = {} + if name: + query_obj = {"name": name} + + account_uuid = kwargs.get("account_uuid", "") + if account_uuid: + query_obj["account_uuid"] = account_uuid + + tunnel_name = kwargs.get("tunnel_name", "") + if tunnel_name: + query_obj["tunnel_name"] = tunnel_name + + try: + # The get() method is shorthand for selecting with a limit of 1 + # If more than one row is found, the first row returned by the database cursor + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid", "account_uuid") + + +class AhvSubnetsCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.AHV_SUBNET + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + account_uuid = CharField(default="") + last_update_time = DateTimeField(default=datetime.datetime.now()) + subnet_type = CharField() + cluster = ForeignKeyField(AhvClustersCache, to_field="uuid", null=True) + vpc = ForeignKeyField(AhvVpcsCache, to_field="uuid", null=True) + + def get_detail_dict(self, *args, **kwargs): + details = { + "name": self.name, + "uuid": self.uuid, + "subnet_type": self.subnet_type, + "account_uuid": self.account_uuid, + "last_update_time": self.last_update_time, + } + if self.cluster: + details["cluster_name"] = self.cluster.name + details["cluster_uuid"] = self.cluster.uuid + elif self.vpc: + details["vpc_name"] = self.vpc.name + details["vpc_uuid"] = self.vpc.uuid + return details + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "UUID", + "TYPE", + "CLUSTER_NAME", + "CLUSTER_UUID", + "VPC_NAME", + "VPC_UUID", + "ACCOUNT_UUID", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["subnet_type"]), + highlight_text(entity_data.get("cluster_name", "-")), + highlight_text(entity_data.get("cluster_uuid", "-")), + highlight_text(entity_data.get("vpc_name", "-")), + highlight_text(entity_data.get("vpc_uuid", "-")), + highlight_text(entity_data["account_uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + payload = {"length": 250, "filter": "state==VERIFIED;type==nutanix_pc"} + account_name_uuid_map = client.account.get_name_uuid_map(payload) + + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + for _, e_uuid in account_name_uuid_map.items(): + try: + res = AhvObj.subnets(account_uuid=e_uuid) + except Exception: + LOG.warning( + "Unable to fetch subnets for Nutanix_PC Account(uuid={})".format( + e_uuid + ) + ) + continue + + for entity in res.get("entities", []): + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + subnet_type = ( + entity["status"].get("resources", {}).get("subnet_type", "-") + ) + cluster_uuid = ( + entity["status"].get("cluster_reference", {}).get("uuid", "") + ) + vpc_uuid = ( + entity["status"] + .get("resources") + .get("vpc_reference", {}) + .get("uuid", "") + ) + LOG.debug( + "Cluster: {}, VPC: {} for account: {}, subnet: {}".format( + cluster_uuid, vpc_uuid, e_uuid, uuid + ) + ) + cls.create_entry( + name=name, + uuid=uuid, + subnet_type=subnet_type, + account_uuid=e_uuid, + cluster_uuid=cluster_uuid, + vpc_uuid=vpc_uuid, + ) + + # For older version < 2.9.0 + # Add working for older versions too + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + account_uuid = kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account UUID not supplied for subnet {}".format(name)) + sys.exit(-1) + + cluster_uuid = kwargs.get("cluster_uuid", "") + vpc_uuid = kwargs.get("vpc_uuid", "") + subnet_type = kwargs.get("subnet_type", "-") + kwargs = { + "name": name, + "uuid": uuid, + "account_uuid": account_uuid, + "subnet_type": subnet_type, + } + if cluster_uuid: + kwargs["cluster"] = cluster_uuid + elif vpc_uuid: + kwargs["vpc"] = vpc_uuid + + # store data in table + super().create(**kwargs) + + @classmethod + def get_entity_data(cls, name, **kwargs): + + query_obj = {"name": name} + account_uuid = kwargs.get("account_uuid", "") + if account_uuid: + query_obj["account_uuid"] = account_uuid + + cluster_name = kwargs.get("cluster", "") + vpc_name = kwargs.get("vpc", "") + if cluster_name: + cluster_query_obj = {"name": cluster_name} + if account_uuid: + cluster_query_obj["account_uuid"] = account_uuid + cluster = AhvClustersCache.get(**cluster_query_obj) + query_obj["cluster"] = cluster.uuid + elif vpc_name: + vpc_query_obj = {"name": vpc_name} + if account_uuid: + vpc_query_obj["account_uuid"] = account_uuid + vpc = AhvVpcsCache.get(**vpc_query_obj) + query_obj["vpc"] = vpc.uuid + try: + # The get() method is shorthand for selecting with a limit of 1 + # If more than one row is found, the first row returned by the database cursor + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + account_uuid = kwargs.get("account_uuid", "") + + try: + if account_uuid: + entity = super().get(cls.uuid == uuid, cls.account_uuid == account_uuid) + else: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid", "account_uuid") + + +class AhvImagesCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.AHV_DISK_IMAGE + feature_min_version = "2.7.0" + name = CharField() + image_type = CharField() + uuid = CharField() + account_uuid = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self): + return { + "name": self.name, + "uuid": self.uuid, + "image_type": self.image_type, + "account_uuid": self.account_uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "UUID", + "IMAGE_TYPE", + "ACCOUNT_UUID", + "LAST UPDATED", + ] + for entity in cls.select().order_by(cls.image_type): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["image_type"]), + highlight_text(entity_data["account_uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table data from server""" + # clear old data + cls.clear() + + client = get_api_client() + payload = {"length": 250, "filter": "state==VERIFIED;type==nutanix_pc"} + account_name_uuid_map = client.account.get_name_uuid_map(payload) + + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + for _, e_uuid in account_name_uuid_map.items(): + try: + res = AhvObj.images(account_uuid=e_uuid) + except Exception: + LOG.warning( + "Unable to fetch images for Nutanix_PC Account(uuid={})".format( + e_uuid + ) + ) + continue + + for entity in res.get("entities", []): + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + # TODO add proper validation for karbon images + image_type = entity["status"]["resources"].get("image_type", "") + cls.create_entry( + name=name, uuid=uuid, image_type=image_type, account_uuid=e_uuid + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + account_uuid = kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account UUID not supplied for image {}".format(name)) + sys.exit(-1) + + image_type = kwargs.get("image_type", "") + # store data in table + super().create( + name=name, uuid=uuid, image_type=image_type, account_uuid=account_uuid + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + account_uuid = kwargs.get("account_uuid", "") + if not account_uuid: + LOG.error("Account UUID not supplied for fetching image {}".format(name)) + sys.exit(-1) + + image_type = kwargs.get("image_type", None) + if not image_type: + LOG.error("image_type not provided for image {}".format(name)) + sys.exit(-1) + + query_obj = { + "name": name, + "image_type": image_type, + "account_uuid": account_uuid, + } + + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + account_uuid = kwargs.get("account_uuid", "") + + try: + if account_uuid: + entity = super().get(cls.uuid == uuid, cls.account_uuid == account_uuid) + else: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid", "account_uuid") + + +class ProjectCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.PROJECT + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + accounts_data = CharField() + whitelisted_subnets = CharField() + whitelisted_clusters = CharField() + whitelisted_vpcs = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "accounts_data": json.loads(self.accounts_data), + "whitelisted_subnets": json.loads(self.whitelisted_subnets), + "whitelisted_clusters": json.loads(self.whitelisted_clusters), + "whitelisted_vpcs": json.loads(self.whitelisted_vpcs), + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table data from server""" + # clear old data + cls.clear() + + # update by latest data + client = get_api_client() + + payload = {"length": 200, "offset": 0, "filter": "state!=DELETED;type!=nutanix"} + account_uuid_type_map = client.account.get_uuid_type_map(payload) + + # store subnets for nutanix_pc accounts in some map, else we had to subnets api + # for each project (Speed very low in case of ~1000 projects) + ntnx_pc_account_subnet_map = dict() + ntnx_pc_account_vpc_map = dict() + ntnx_pc_account_cluster_map = dict() + ntnx_pc_subnet_cluster_map = dict() + ntnx_pc_subnet_vpc_map = dict() + for _acct_uuid in account_uuid_type_map.keys(): + if account_uuid_type_map[_acct_uuid] == "nutanix_pc": + ntnx_pc_account_subnet_map[_acct_uuid] = list() + ntnx_pc_account_vpc_map[_acct_uuid] = list() + ntnx_pc_account_cluster_map[_acct_uuid] = list() + + # Get the subnets for each nutanix_pc account + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + for acct_uuid in ntnx_pc_account_subnet_map.keys(): + LOG.debug( + "Fetching subnets for nutanix_pc account_uuid {}".format(acct_uuid) + ) + res = {} + try: + res = AhvObj.subnets(account_uuid=acct_uuid) + except Exception as exp: + LOG.exception(exp) + LOG.warning( + "Unable to fetch subnets for Nutanix_PC Account(uuid={})".format( + acct_uuid + ) + ) + continue + + for row in res.get("entities", []): + _sub_uuid = row["metadata"]["uuid"] + ntnx_pc_account_subnet_map[acct_uuid].append(_sub_uuid) + if row["status"]["resources"]["subnet_type"] == "VLAN": + ntnx_pc_subnet_cluster_map[_sub_uuid] = row["status"][ + "cluster_reference" + ]["uuid"] + elif row["status"]["resources"]["subnet_type"] == "OVERLAY": + ntnx_pc_subnet_vpc_map[_sub_uuid] = row["status"]["resources"][ + "vpc_reference" + ]["uuid"] + + LOG.debug( + "Fetching clusters for nutanix_pc account_uuid {}".format(acct_uuid) + ) + res = {} + try: + res = AhvObj.clusters(account_uuid=acct_uuid) + except Exception as exp: + LOG.exception(exp) + LOG.warning( + "Unable to fetch clusters for Nutanix_PC Account(uuid={})".format( + acct_uuid + ) + ) + for row in res.get("entities", []): + ntnx_pc_account_cluster_map[acct_uuid].append(row["metadata"]["uuid"]) + + LOG.debug("Fetching VPCs for nutanix_pc account_uuid {}".format(acct_uuid)) + res = {} + try: + res = AhvObj.vpcs(account_uuid=acct_uuid) + except Exception as exp: + LOG.exception(exp) + LOG.warning( + "Unable to fetch VPCs for Nutanix_PC Account(uuid={})".format( + acct_uuid + ) + ) + for row in res.get("entities", []): + ntnx_pc_account_vpc_map[acct_uuid].append(row["metadata"]["uuid"]) + + # Getting projects data + res_entities, err = client.project.list_all(ignore_error=True) + if err: + LOG.exception(err) + + for entity in res_entities: + # populating a map to lookup the account to which a subnet belongs + whitelisted_subnets = dict() + whitelisted_clusters = dict() + whitelisted_vpcs = dict() + + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + + account_list = entity["status"]["resources"].get( + "account_reference_list", [] + ) + + cluster_uuids = [ + cluster["uuid"] + for cluster in entity["status"]["resources"].get( + "cluster_reference_list", [] + ) + ] + vpc_uuids = [ + vpc["uuid"] + for vpc in entity["status"]["resources"].get("vpc_reference_list", []) + ] + + project_subnets_ref_list = entity["spec"].get("resources", {}).get( + "external_network_list", [] + ) + entity["spec"].get("resources", {}).get("subnet_reference_list", []) + project_subnet_uuids = [item["uuid"] for item in project_subnets_ref_list] + + account_map = {} + for account in account_list: + account_uuid = account["uuid"] + # As projects may have deleted accounts registered + if account_uuid not in account_uuid_type_map: + continue + + account_type = account_uuid_type_map[account_uuid] + + if not account_map.get(account_type): + account_map[account_type] = [] + + account_map[account_type].append(account_uuid) + + # for PC accounts add subnets to subnet_to_account_map. Will use it to populate whitelisted_subnets + if account_type == "nutanix_pc": + whitelisted_subnets[account_uuid] = list( + set(project_subnet_uuids) + & set(ntnx_pc_account_subnet_map[account_uuid]) + ) + + for _subnet_uuid in whitelisted_subnets[account_uuid]: + _subnet_cluster_uuid = ntnx_pc_subnet_cluster_map.get( + _subnet_uuid + ) + if ( + _subnet_cluster_uuid + and _subnet_cluster_uuid not in cluster_uuids + ): + cluster_uuids.append(_subnet_cluster_uuid) + _subnet_vpc_uuid = ntnx_pc_subnet_vpc_map.get(_subnet_uuid) + if _subnet_vpc_uuid and _subnet_vpc_uuid not in vpc_uuids: + vpc_uuids.append(_subnet_vpc_uuid) + + whitelisted_vpcs[account_uuid] = list( + set(vpc_uuids) & set(ntnx_pc_account_vpc_map[account_uuid]) + ) + + whitelisted_clusters[account_uuid] = list( + set(cluster_uuids) + & set(ntnx_pc_account_cluster_map[account_uuid]) + ) + + accounts_data = json.dumps(account_map) + + whitelisted_subnets = json.dumps(whitelisted_subnets) + whitelisted_clusters = json.dumps(whitelisted_clusters) + whitelisted_vpcs = json.dumps(whitelisted_vpcs) + cls.create_entry( + name=name, + uuid=uuid, + accounts_data=accounts_data, + whitelisted_subnets=whitelisted_subnets, + whitelisted_clusters=whitelisted_clusters, + whitelisted_vpcs=whitelisted_vpcs, + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + accounts_data = kwargs.get("accounts_data", "{}") + whitelisted_subnets = kwargs.get("whitelisted_subnets", "[]") + whitelisted_clusters = kwargs.get("whitelisted_clusters", "[]") + whitelisted_vpcs = kwargs.get("whitelisted_vpcs", "[]") + super().create( + name=name, + uuid=uuid, + accounts_data=accounts_data, + whitelisted_subnets=whitelisted_subnets, + whitelisted_clusters=whitelisted_clusters, + whitelisted_vpcs=whitelisted_vpcs, + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + query_obj = {"name": name} + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def fetch_one(cls, uuid): + """returns project data for project uuid""" + + # update by latest data + client = get_api_client() + + payload = {"length": 200, "offset": 0, "filter": "state!=DELETED;type!=nutanix"} + account_uuid_type_map = client.account.get_uuid_type_map(payload) + + res, err = client.project.read(uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + return {} + + project_data = res.json() + project_name = project_data["spec"]["name"] + account_list = project_data["spec"]["resources"].get( + "account_reference_list", [] + ) + project_subnets_ref_list = project_data["spec"].get("resources", {}).get( + "external_network_list", [] + ) + project_data["spec"].get("resources", {}).get("subnet_reference_list", []) + project_subnet_uuids = [item["uuid"] for item in project_subnets_ref_list] + + project_cluster_uuids = [ + cluster["uuid"] + for cluster in project_data["status"]["resources"].get( + "cluster_reference_list", [] + ) + ] + project_vpc_uuids = [ + vpc["uuid"] + for vpc in project_data["status"]["resources"].get("vpc_reference_list", []) + ] + + # populating a map to lookup the account to which a subnet belongs + whitelisted_subnets = dict() + whitelisted_clusters = dict() + whitelisted_vpcs = dict() + account_map = dict() + for _acc in account_list: + account_uuid = _acc["uuid"] + + # As projects may have deleted accounts registered + if account_uuid not in account_uuid_type_map: + continue + account_type = account_uuid_type_map[account_uuid] + if account_type not in account_map: + account_map[account_type] = [account_uuid] + else: + account_map[account_type].append(account_uuid) + + if account_type == "nutanix_pc": + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + + filter_query = "_entity_id_=={}".format("|".join(project_subnet_uuids)) + LOG.debug( + "fetching following subnets {} for nutanix_pc account_uuid {}".format( + project_subnet_uuids, account_uuid + ) + ) + try: + res = AhvObj.subnets( + account_uuid=account_uuid, filter_query=filter_query + ) + except Exception: + LOG.warning( + "Unable to fetch subnets for Nutanix_PC Account(uuid={})".format( + account_uuid + ) + ) + continue + + whitelisted_subnets[account_uuid] = [ + row["metadata"]["uuid"] for row in res["entities"] + ] + for row in res.get("entities", []): + _cluster_uuid = (row["status"].get("cluster_reference") or {}).get( + "uuid", "" + ) + _vpc_uuid = ( + row["status"]["resources"].get("vpc_reference") or {} + ).get("uuid", "") + whitelisted_subnets[account_uuid].append(row["metadata"]["uuid"]) + if ( + row["status"]["resources"]["subnet_type"] == "VLAN" + and _cluster_uuid not in project_cluster_uuids + ): + project_cluster_uuids.append(_cluster_uuid) + elif ( + row["status"]["resources"]["subnet_type"] == "OVERLAY" + and _vpc_uuid not in project_vpc_uuids + ): + project_vpc_uuids.append(_vpc_uuid) + + # fetch clusters + if project_cluster_uuids: + filter_query = "_entity_id_=={}".format( + "|".join(project_cluster_uuids) + ) + LOG.debug( + "fetching following cluster {} for nutanix_pc account_uuid {}".format( + project_cluster_uuids, account_uuid + ) + ) + try: + res = AhvObj.clusters( + account_uuid=account_uuid, filter_query=filter_query + ) + except Exception: + LOG.warning( + "Unable to fetch clusters for Nutanix_PC Account(uuid={})".format( + account_uuid + ) + ) + continue + + whitelisted_clusters[account_uuid] = [ + row["metadata"]["uuid"] for row in res["entities"] + ] + + # fetch vpcs + if project_vpc_uuids: + filter_query = "_entity_id_=={}".format("|".join(project_vpc_uuids)) + LOG.debug( + "fetching following vpcs {} for nutanix_pc account_uuid {}".format( + project_vpc_uuids, account_uuid + ) + ) + try: + res = AhvObj.vpcs( + account_uuid=account_uuid, filter_query=filter_query + ) + except Exception: + LOG.warning( + "Unable to fetch vpcs for Nutanix_PC Account(uuid={})".format( + account_uuid + ) + ) + continue + + whitelisted_vpcs[account_uuid] = [ + row["metadata"]["uuid"] for row in res["entities"] + ] + + accounts_data = json.dumps(account_map) + whitelisted_subnets = json.dumps(whitelisted_subnets) + whitelisted_clusters = json.dumps(whitelisted_clusters) + whitelisted_vpcs = json.dumps(whitelisted_vpcs) + return { + "name": project_name, + "uuid": uuid, + "accounts_data": accounts_data, + "whitelisted_subnets": whitelisted_subnets, + "whitelisted_clusters": whitelisted_clusters, + "whitelisted_vpcs": whitelisted_vpcs, + } + + @classmethod + def add_one(cls, uuid, **kwargs): + """adds one entry to project table""" + + db_data = cls.fetch_one(uuid, **kwargs) + cls.create_entry(**db_data) + + @classmethod + def delete_one(cls, uuid, **kwargs): + """deletes one entity from project""" + + obj = cls.get(cls.uuid == uuid) + obj.delete_instance() + + @classmethod + def update_one(cls, uuid, **kwargs): + """updates single entry to project table""" + + db_data = cls.fetch_one(uuid, **kwargs) + q = cls.update( + { + cls.accounts_data: db_data["accounts_data"], + cls.whitelisted_subnets: db_data["whitelisted_subnets"], + cls.whitelisted_vpcs: db_data["whitelisted_vpcs"], + cls.whitelisted_clusters: db_data["whitelisted_clusters"], + } + ).where(cls.uuid == uuid) + q.execute() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class EnvironmentCache(CacheTableBase): + __cache_type__ = "environment" + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + project_uuid = CharField() + accounts_data = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "project_uuid": self.project_uuid, + "accounts_data": json.loads(self.accounts_data), + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "PROJECT_UUID", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data.get("project_uuid", "")), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + """sync the table data from server""" + # clear old data + cls.clear() + + # update by latest data + client = get_api_client() + + env_list = client.environment.list_all() + for entity in env_list: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + project_uuid = ( + entity["metadata"].get("project_reference", {}).get("uuid", "") + ) + + # ignore environments that are not associated to a project + if not project_uuid: + continue + + infra_inclusion_list = entity["status"]["resources"].get( + "infra_inclusion_list", [] + ) + account_map = {} + for infra in infra_inclusion_list: + account_type = infra["type"] + account_uuid = infra["account_reference"]["uuid"] + account_data = dict( + uuid=account_uuid, + name=infra["account_reference"]["name"], + ) + + if account_type == "nutanix_pc": + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + subnet_refs = infra.get("subnet_references", []) + account_data["subnet_uuids"] = [row["uuid"] for row in subnet_refs] + cluster_refs = infra.get("cluster_references", []) + account_data["cluster_uuids"] = [ + row["uuid"] for row in cluster_refs + ] + vpc_refs = infra.get("vpc_references", []) + account_data["vpc_uuids"] = [row["uuid"] for row in vpc_refs] + + # It may happen, that cluster reference is not present in migrated environment + res = {} + filter_query = "_entity_id_=={}".format( + "|".join(account_data["subnet_uuids"]) + ) + try: + res = AhvObj.subnets( + account_uuid=account_uuid, filter_query=filter_query + ) + except Exception as exp: + LOG.exception(exp) + LOG.warning( + "Unable to fetch subnets for Nutanix_PC Account(uuid={})".format( + account_uuid + ) + ) + continue + for row in res.get("entities", []): + _subnet_type = row["status"]["resources"]["subnet_type"] + if ( + _subnet_type == "VLAN" + and row["status"]["cluster_reference"]["uuid"] + not in account_data["cluster_uuids"] + ): + account_data["cluster_uuids"].append( + row["status"]["cluster_reference"]["uuid"] + ) + elif ( + _subnet_type == "OVERLAY" + and row["status"]["resources"]["vpc_reference"]["uuid"] + not in account_data["vpc_uuids"] + ): + account_data["vpc_uuids"].append( + row["status"]["resources"]["vpc_reference"]["uuid"] + ) + + if not account_map.get(account_type): + account_map[account_type] = [] + + account_map[account_type].append(account_data) + + accounts_data = json.dumps(account_map) + cls.create_entry( + name=name, + uuid=uuid, + accounts_data=accounts_data, + project_uuid=project_uuid, + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + super().create( + name=name, + uuid=uuid, + accounts_data=kwargs.get("accounts_data", "{}"), + project_uuid=kwargs.get("project_uuid", ""), + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + query_obj = {"name": name} + project_uuid = kwargs.get("project_uuid", "") + if project_uuid: + query_obj["project_uuid"] = project_uuid + + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def fetch_one(cls, uuid): + """fetches one entity data""" + + client = get_api_client() + res, err = client.environment.read(uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + return {} + + entity = res.json() + env_name = entity["status"]["name"] + project_uuid = entity["metadata"].get("project_reference", {}).get("uuid", "") + infra_inclusion_list = entity["status"]["resources"].get( + "infra_inclusion_list", [] + ) + account_map = {} + for infra in infra_inclusion_list: + _account_type = infra["type"] + _account_uuid = infra["account_reference"]["uuid"] + _account_data = dict( + uuid=infra["account_reference"]["uuid"], + name=infra["account_reference"].get("name", ""), + ) + + if _account_type == "nutanix_pc": + AhvVmProvider = cls.get_provider_plugin("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + subnet_refs = infra.get("subnet_references", []) + _account_data["subnet_uuids"] = [row["uuid"] for row in subnet_refs] + cluster_refs = infra.get("cluster_references", []) + _account_data["cluster_uuids"] = [row["uuid"] for row in cluster_refs] + vpc_refs = infra.get("vpc_references", []) + _account_data["vpc_uuids"] = [row["uuid"] for row in vpc_refs] + + # It may happen, that cluster reference is not present in migrated environment + res = {} + filter_query = "_entity_id_=={}".format( + "|".join(_account_data["subnet_uuids"]) + ) + try: + res = AhvObj.subnets( + account_uuid=_account_uuid, filter_query=filter_query + ) + except Exception as exp: + LOG.exception(exp) + LOG.warning( + "Unable to fetch subnets for Nutanix_PC Account(uuid={})".format( + _account_uuid + ) + ) + continue + for row in res.get("entities", []): + _subnet_type = row["status"]["resources"]["subnet_type"] + if ( + _subnet_type == "VLAN" + and row["status"]["cluster_reference"]["uuid"] + not in _account_data["cluster_uuids"] + ): + _account_data["cluster_uuids"].append( + row["status"]["cluster_reference"]["uuid"] + ) + elif ( + _subnet_type == "OVERLAY" + and row["status"]["resources"]["vpc_reference"]["uuid"] + not in _account_data["vpc_uuids"] + ): + _account_data["vpc_uuids"].append( + row["status"]["resources"]["vpc_reference"]["uuid"] + ) + + if not account_map.get(_account_type): + account_map[_account_type] = [] + + account_map[_account_type].append(_account_data) + + accounts_data = json.dumps(account_map) + return { + "name": env_name, + "uuid": uuid, + "accounts_data": accounts_data, + "project_uuid": project_uuid, + } + + @classmethod + def add_one(cls, uuid, **kwargs): + """adds one entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + cls.create_entry(**db_data) + + @classmethod + def delete_one(cls, uuid, **kwargs): + """deletes one entity from env table""" + + obj = cls.get(cls.uuid == uuid) + obj.delete_instance() + + @classmethod + def update_one(cls, uuid, **kwargs): + """updates single entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + q = cls.update( + { + cls.accounts_data: db_data["accounts_data"], + cls.project_uuid: db_data["project_uuid"], + } + ).where(cls.uuid == uuid) + q.execute() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class UsersCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.USER + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + display_name = CharField() + directory = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "display_name": self.display_name, + "directory": self.directory, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "DISPLAY_NAME", + "UUID", + "DIRECTORY", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["display_name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["directory"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + directory = kwargs.get("directory", "") + if not directory: + LOG.error( + "Directory_service not supplied for creating user {}".format(name) + ) + sys.exit(-1) + + display_name = kwargs.get("display_name") or "" + super().create( + name=name, uuid=uuid, directory=directory, display_name=display_name + ) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + entities = client.user.list_all(api_limit=500) + + for entity in entities: + + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + display_name = entity["status"]["resources"].get("display_name") or "" + directory_service_user = ( + entity["status"]["resources"].get("directory_service_user") or dict() + ) + directory_service_ref = ( + directory_service_user.get("directory_service_reference") or dict() + ) + directory_service_name = directory_service_ref.get("name", "LOCAL") + + if directory_service_name: + cls.create_entry( + name=name, + uuid=uuid, + display_name=display_name, + directory=directory_service_name, + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + + query_obj = {"name": name} + + display_name = kwargs.get("display_name", "") + if display_name: + query_obj["display_name"] = display_name + + directory = kwargs.get("directory", "") + if directory: + query_obj["directory"] = directory + + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def fetch_one(cls, uuid): + """fetches one entity data""" + + client = get_api_client() + res, err = client.user.read(uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + return {} + + entity = res.json() + name = entity["status"]["name"] + display_name = entity["status"]["resources"].get("display_name") or "" + directory_service_user = ( + entity["status"]["resources"].get("directory_service_user") or dict() + ) + directory_service_ref = ( + directory_service_user.get("directory_service_reference") or dict() + ) + directory_service_name = directory_service_ref.get("name", "LOCAL") + + return { + "name": name, + "uuid": uuid, + "display_name": display_name, + "directory": directory_service_name, + } + + @classmethod + def add_one(cls, uuid, **kwargs): + """adds one entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + cls.create_entry(**db_data) + + @classmethod + def delete_one(cls, uuid, **kwargs): + """deletes one entity from env table""" + + obj = cls.get(cls.uuid == uuid) + obj.delete_instance() + + @classmethod + def update_one(cls, uuid, **kwargs): + """updates single entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + q = cls.update( + { + cls.name: db_data["name"], + cls.display_name: db_data["display_name"], + cls.directory: db_data["directory"], + } + ).where(cls.uuid == uuid) + q.execute() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class RolesCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.ROLE + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + super().create(name=name, uuid=uuid) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + Obj = get_resource_api("roles", client.connection) + res, err = Obj.list({"length": 1000}) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + cls.create_entry(name=name, uuid=uuid) + + @classmethod + def get_entity_data(cls, name, **kwargs): + + query_obj = {"name": name} + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def fetch_one(cls, uuid): + """fetches one entity data""" + + client = get_api_client() + res, err = client.role.read(uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + return {} + + entity = res.json() + name = entity["status"]["name"] + + return {"name": name, "uuid": uuid} + + @classmethod + def add_one(cls, uuid, **kwargs): + """adds one entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + cls.create_entry(**db_data) + + @classmethod + def delete_one(cls, uuid, **kwargs): + """deletes one entity from env table""" + + obj = cls.get(cls.uuid == uuid) + obj.delete_instance() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class DirectoryServiceCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.DIRECTORY_SERVICE + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + super().create(name=name, uuid=uuid) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + Obj = get_resource_api("directory_services", client.connection) + res, err = Obj.list({"length": 1000}) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + cls.create_entry(name=name, uuid=uuid) + + @classmethod + def get_entity_data(cls, name, **kwargs): + + query_obj = {"name": name} + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class UserGroupCache(CacheTableBase): + __cache_type__ = CACHE.ENTITY.USER_GROUP + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + display_name = CharField() + directory = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "display_name": self.display_name, + "directory": self.directory, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "DISPLAY_NAME", + "UUID", + "DIRECTORY", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["display_name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["directory"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + directory = kwargs.get("directory", "") + if not directory: + LOG.error( + "Directory_service not supplied for creating user {}".format(name) + ) + sys.exit(-1) + + display_name = kwargs.get("display_name") or "" + super().create( + name=name, uuid=uuid, directory=directory, display_name=display_name + ) + + @classmethod + def sync(cls): + """sync the table from server""" + + # clear old data + cls.clear() + + client = get_api_client() + Obj = get_resource_api("user_groups", client.connection) + res, err = Obj.list({"length": 1000}) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + state = entity["status"]["state"] + if state != "COMPLETE": + continue + + e_resources = entity["status"]["resources"] + + directory_service_user_group = ( + e_resources.get("directory_service_user_group") or dict() + ) + distinguished_name = directory_service_user_group.get("distinguished_name") + + directory_service_ref = ( + directory_service_user_group.get("directory_service_reference") + or dict() + ) + directory_service_name = directory_service_ref.get("name", "") + + display_name = e_resources.get("display_name", "") + uuid = entity["metadata"]["uuid"] + + if directory_service_name and distinguished_name: + cls.create_entry( + name=distinguished_name, + uuid=uuid, + display_name=display_name, + directory=directory_service_name, + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + + query_obj = {"name": name} + + display_name = kwargs.get("display_name", "") + if display_name: + query_obj["display_name"] = display_name + + directory = kwargs.get("directory", "") + if directory: + query_obj["directory"] = directory + + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def get_entity_data_using_uuid(cls, uuid, **kwargs): + try: + entity = super().get(cls.uuid == uuid) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + @classmethod + def fetch_one(cls, uuid): + """fetches one entity data""" + + client = get_api_client() + res, err = client.group.read(uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + return {} + + entity = res.json() + e_resources = entity["status"]["resources"] + + directory_service_user_group = ( + e_resources.get("directory_service_user_group") or dict() + ) + distinguished_name = directory_service_user_group.get("distinguished_name") + + directory_service_ref = ( + directory_service_user_group.get("directory_service_reference") or dict() + ) + directory_service_name = directory_service_ref.get("name", "") + + display_name = e_resources.get("display_name", "") + uuid = entity["metadata"]["uuid"] + + return { + "name": distinguished_name, + "uuid": uuid, + "display_name": display_name, + "directory": directory_service_name, + } + + @classmethod + def add_one(cls, uuid, **kwargs): + """adds one entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + cls.create_entry(**db_data) + + @classmethod + def delete_one(cls, uuid, **kwargs): + """deletes one entity from env table""" + + obj = cls.get(cls.uuid == uuid) + obj.delete_instance() + + @classmethod + def update_one(cls, uuid, **kwargs): + """updates single entry to env table""" + + db_data = cls.fetch_one(uuid, **kwargs) + q = cls.update( + { + cls.name: db_data["name"], + cls.display_name: db_data["display_name"], + cls.directory: db_data["directory"], + } + ).where(cls.uuid == uuid) + q.execute() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class AhvNetworkFunctionChain(CacheTableBase): + __cache_type__ = CACHE.ENTITY.AHV_NETWORK_FUNCTION_CHAIN + feature_min_version = "2.7.0" + name = CharField() + uuid = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = ["NAME", "UUID", "LAST UPDATED"] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + # clear old data + cls.clear() + + # update by latest data + client = get_api_client() + Obj = get_resource_api("network_function_chains", client.connection) + res, err = Obj.list({"length": 1000}) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + cls.create_entry(name=name, uuid=uuid) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + super().create(name=name, uuid=uuid) + + @classmethod + def get_entity_data(cls, name, **kwargs): + try: + entity = super().get(cls.name == name) + return entity.get_detail_dict() + + except DoesNotExist: + return dict() + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid") + + +class AppProtectionPolicyCache(CacheTableBase): + __cache_type__ = "app_protection_policy" + feature_min_version = "3.3.0" + name = CharField() + uuid = CharField() + rule_name = CharField() + rule_uuid = CharField() + rule_expiry = IntegerField() + rule_type = CharField() + project_name = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self, *args, **kwargs): + return { + "name": self.name, + "uuid": self.uuid, + "rule_name": self.rule_name, + "rule_uuid": self.rule_uuid, + "rule_expiry": self.rule_expiry, + "rule_type": self.rule_type, + "project_name": self.project_name, + "last_update_time": self.last_update_time, + } + + @classmethod + def clear(cls): + """removes entire data from table""" + for db_entity in cls.select(): + db_entity.delete_instance() + + @classmethod + def show_data(cls): + """display stored data in table""" + + if not len(cls.select()): + click.echo(highlight_text("No entry found !!!")) + return + + table = PrettyTable() + table.field_names = [ + "NAME", + "UUID", + "RULE NAME", + "RULE TYPE", + "EXPIRY (DAYS)", + "PROJECT", + "LAST UPDATED", + ] + for entity in cls.select(): + entity_data = entity.get_detail_dict() + if not entity_data["rule_expiry"]: + entity_data["rule_expiry"] = "-" + last_update_time = arrow.get( + entity_data["last_update_time"].astimezone(datetime.timezone.utc) + ).humanize() + table.add_row( + [ + highlight_text(entity_data["name"]), + highlight_text(entity_data["uuid"]), + highlight_text(entity_data["rule_name"]), + highlight_text(entity_data["rule_type"]), + highlight_text(entity_data["rule_expiry"]), + highlight_text(entity_data["project_name"]), + highlight_text(last_update_time), + ] + ) + click.echo(table) + + @classmethod + def sync(cls): + # clear old data + cls.clear() + + # update by latest data + client = get_api_client() + Obj = get_resource_api( + "app_protection_policies", client.connection, calm_api=True + ) + entities = Obj.list_all() + + for entity in entities: + name = entity["status"]["name"] + uuid = entity["metadata"]["uuid"] + project_reference = entity["metadata"].get("project_reference", {}) + for rule in entity["status"]["resources"]["app_protection_rule_list"]: + expiry = 0 + rule_type = "" + if rule.get("remote_snapshot_retention_policy", {}): + rule_type = "Remote" + expiry = ( + rule["remote_snapshot_retention_policy"] + .get("snapshot_expiry_policy", {}) + .get("multiple", 0) + ) + elif rule.get("local_snapshot_retention_policy", {}): + rule_type = "Local" + expiry = ( + rule["local_snapshot_retention_policy"] + .get("snapshot_expiry_policy", {}) + .get("multiple", 0) + ) + rule_name = rule["name"] + rule_uuid = rule["uuid"] + cls.create_entry( + name=name, + uuid=uuid, + rule_name=rule_name, + rule_uuid=rule_uuid, + project_name=project_reference.get("name", ""), + rule_expiry=expiry, + rule_type=rule_type, + ) + + @classmethod + def create_entry(cls, name, uuid, **kwargs): + rule_name = kwargs.get("rule_name", "") + rule_uuid = kwargs.get("rule_uuid", "") + rule_expiry = kwargs.get("rule_expiry", 0) + rule_type = kwargs.get("rule_type", "") + project_name = kwargs.get("project_name", "") + if not rule_uuid: + LOG.error( + "Protection Rule UUID not supplied for Protection Policy {}".format( + name + ) + ) + sys.exit("Missing rule_uuid for protection policy") + super().create( + name=name, + uuid=uuid, + rule_name=rule_name, + rule_uuid=rule_uuid, + rule_expiry=rule_expiry, + rule_type=rule_type, + project_name=project_name, + ) + + @classmethod + def get_entity_data(cls, name, **kwargs): + rule_uuid = kwargs.get("rule_uuid", "") + rule_name = kwargs.get("rule_name", "") + query_obj = {"name": name, "project_name": kwargs.get("project_name", "")} + if rule_name: + query_obj["rule_name"] = rule_name + elif rule_uuid: + query_obj["rule_uuid"] = rule_uuid + + try: + entity = super().get(**query_obj) + return entity.get_detail_dict() + + except DoesNotExist: + return None + + class Meta: + database = dsl_database + primary_key = CompositeKey("name", "uuid", "rule_uuid") + + +class VersionTable(BaseModel): + name = CharField() + version = CharField() + last_update_time = DateTimeField(default=datetime.datetime.now()) + + def get_detail_dict(self): + return {"name": self.name, "version": self.version} + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) diff --git a/framework/calm/dsl/decompile/__init__.py b/framework/calm/dsl/decompile/__init__.py new file mode 100644 index 0000000..3c21656 --- /dev/null +++ b/framework/calm/dsl/decompile/__init__.py @@ -0,0 +1,4 @@ +from .main import init_decompile_context + + +__all__ = ["init_decompile_context"] diff --git a/framework/calm/dsl/decompile/action.py b/framework/calm/dsl/decompile/action.py new file mode 100644 index 0000000..683983a --- /dev/null +++ b/framework/calm/dsl/decompile/action.py @@ -0,0 +1,158 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.task import render_task_template +from calm.dsl.decompile.parallel_task import render_parallel_task_template +from calm.dsl.decompile.variable import render_variable_template +from calm.dsl.builtins import action, ActionType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) +RUNBOOK_ACTION_MAP = {} + + +def render_action_template(cls, entity_context="", CONFIG_SPEC_MAP={}): + + global RUNBOOK_ACTION_MAP + LOG.debug("Rendering {} action template".format(cls.__name__)) + if not isinstance(cls, ActionType): + raise TypeError("{} is not of type {}".format(cls, action)) + + # Update entity context + # TODO for now, not adding runbook to context as current mapping -is 1:1 + entity_context = entity_context + "_Action_" + cls.__name__ + + runbook = cls.runbook + runbook_name = getattr(runbook, "name", "") or runbook.__name__ + # Note cls.__name__ should be used for call_runbook tasks + RUNBOOK_ACTION_MAP[runbook_name] = cls.__name__ + + # NOTE Not using main_task_local_reference for now, + # bcz type of main task is "DAG" + levelled_tasks = get_task_order(runbook.tasks) + tasks = [] + for task_list in levelled_tasks: + if len(task_list) != 1: + tasks.append( + render_parallel_task_template( + task_list, entity_context, RUNBOOK_ACTION_MAP, CONFIG_SPEC_MAP + ) + ) + else: + tasks.append( + render_task_template( + task_list[0], entity_context, RUNBOOK_ACTION_MAP, CONFIG_SPEC_MAP + ) + ) + + variables = [] + for variable in runbook.variables: + variables.append(render_variable_template(variable, entity_context)) + + if not (variables or tasks): + return "" + + user_attrs = { + "name": cls.__name__, + "description": cls.__doc__ or "", + "tasks": tasks, + "variables": variables, + } + + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + text = render_template(schema_file="action.py.jinja2", obj=user_attrs) + return text.strip() + + +def get_task_order(task_list): + """Returns the list where each index represents a list of task that executes parallely""" + + dag_task = None + for ind, task in enumerate(task_list): + if task.type == "DAG": + dag_task = task + task_list.pop(ind) + break + + if not dag_task: + raise ValueError("Dag task not found") + + # Edges between tasks + edges = dag_task.attrs["edges"] + + # Final resultant task list with level as index + res_task_list = [] + + # map to store the edges from given task + task_edges_map = {} + + # map to store indegree of everyu task + task_indegree_count_map = {} + + # create task map with name + task_name_data_map = {} + for task in task_list: + task_name = task.name + task_name_data_map[task_name] = task + task_indegree_count_map[task_name] = 0 + task_edges_map[task_name] = [] + + # store in degree of every task + for edge in edges: + from_task = edge["from_task_reference"] + to_task = edge["to_task_reference"] + task_indegree_count_map[to_task.name] += 1 + task_edges_map[from_task.name].append(to_task.name) + + # Queue to store elements having indegree 0 + queue = [] + + # Push elements having indegree = 0 + for task_name, indegree in task_indegree_count_map.items(): + if indegree == 0: + queue.append(task_name) + + # Topological sort + while queue: + + # length of queue + ql = len(queue) + + # Inserting task with current indegree = 0 + task_data_list = [] + for task in queue: + task_data_list.append(task_name_data_map[task]) + + if task_data_list: + res_task_list.append(task_data_list) + + while ql: + # Popping the element at start + cur_task = queue.pop(0) + + # Iterating its edges, and decrease the indegree of to_edge task by 1 + for to_task in task_edges_map[cur_task]: + task_indegree_count_map[to_task] -= 1 + + # If indegree is 0, push to queue + if task_indegree_count_map[to_task] == 0: + queue.append(to_task) + + # decrement the counter for queue length + ql -= 1 + + return res_task_list + + +def init_action_globals(): + + global RUNBOOK_ACTION_MAP + RUNBOOK_ACTION_MAP = {} + + +# Used for registering service action runbook earlier before parsing that template +def update_runbook_action_map(runbook_name, action_name): + + global RUNBOOK_ACTION_MAP + RUNBOOK_ACTION_MAP[runbook_name] = action_name diff --git a/framework/calm/dsl/decompile/ahv_vm.py b/framework/calm/dsl/decompile/ahv_vm.py new file mode 100644 index 0000000..e3ddd00 --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm.py @@ -0,0 +1,36 @@ +from calm.dsl.builtins import AhvVmType + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.ahv_vm_resources import render_ahv_vm_resources +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm(cls, boot_config): + + LOG.debug("Rendering {} ahv_vm template".format(cls.__name__)) + if not isinstance(cls, AhvVmType): + raise TypeError("{} is not of type {}".format(cls, AhvVmType)) + + user_attrs = cls.get_user_attrs() + + vm_name = cls.__name__ + user_attrs["name"] = vm_name + if cls.cluster: + user_attrs["cluster_name"] = str(cls.cluster) + + # Update service name map and gui name + gui_display_name = getattr(cls, "name", "") or vm_name + if gui_display_name != vm_name: + user_attrs["gui_display_name"] = gui_display_name + + # render resources template + user_attrs["resources_cls_name"] = "{}Resources".format(vm_name) + cls.resources.__name__ = user_attrs["resources_cls_name"] + user_attrs["resources"] = render_ahv_vm_resources( + cls.resources, boot_config=boot_config, vm_name_prefix=vm_name + ) + + text = render_template(schema_file="ahv_vm.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ahv_vm_disk.py b/framework/calm/dsl/decompile/ahv_vm_disk.py new file mode 100644 index 0000000..7c4fc9d --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm_disk.py @@ -0,0 +1,150 @@ +import sys + +from calm.dsl.constants import CACHE +from calm.dsl.decompile.render import render_template +from calm.dsl.store import Cache +from calm.dsl.log import get_logging_handle +from calm.dsl.decompile.ref_dependency import get_package_name + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm_disk(cls, boot_config): + + data_source_ref = cls.data_source_reference or {} + if data_source_ref: + data_source_ref = data_source_ref.get_dict() + + device_properties = cls.device_properties.get_dict() + + disk_size_mib = cls.disk_size_mib + + # find device type + device_type = device_properties["device_type"] + adapter_type = device_properties["disk_address"]["adapter_type"] + adapter_index = device_properties["disk_address"]["device_index"] + + schema_file = "" + user_attrs = {} + + # Atleast one disk should be bootable + if boot_config: + if ( + adapter_type == boot_config["boot_device"]["disk_address"]["adapter_type"] + and adapter_index + == boot_config["boot_device"]["disk_address"]["device_index"] + ): + user_attrs["bootable"] = True + + # find operation_type + if data_source_ref: + if data_source_ref["kind"] == "app_package": + user_attrs["name"] = data_source_ref.get("name") + user_attrs["name"] = ( + get_package_name(user_attrs["name"]) or user_attrs["name"] + ) + + operation_type = "cloneFromVMDiskPackage" + + elif data_source_ref["kind"] == "image": + operation_type = "cloneFromImageService" + img_uuid = data_source_ref.get("uuid") + disk_cache_data = ( + Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_DISK_IMAGE, uuid=img_uuid + ) + or {} + ) + if not disk_cache_data: + # Windows images may not be present + LOG.warning("Image with uuid '{}' not found".format(img_uuid)) + user_attrs["name"] = disk_cache_data.get("name", "") + + else: + LOG.error( + "Unknown kind `{}` for data source reference in image".format( + data_source_ref["kind"] + ) + ) + + else: + if device_type == "DISK": + user_attrs["size"] = disk_size_mib // 1024 + operation_type = "allocateOnStorageContainer" + + elif device_type == "CDROM": + operation_type = "emptyCdRom" + + else: + LOG.error("Unknown device type") + sys.exit(-1) + + # TODO add whitelisting from project via attached accounts + if device_type == "DISK": + if adapter_type == "SCSI": + if operation_type == "cloneFromImageService": + schema_file = "ahv_vm_disk_scsi_clone_from_image.py.jinja2" + + elif operation_type == "cloneFromVMDiskPackage": + schema_file = "ahv_vm_disk_scsi_clone_from_pkg.py.jinja2" + + elif operation_type == "allocateOnStorageContainer": + schema_file = "ahv_vm_disk_scsi_allocate_container.py.jinja2" + + else: + LOG.error("Unknown operation type {}".format(operation_type)) + sys.exit(-1) + + elif adapter_type == "PCI": + if operation_type == "cloneFromImageService": + schema_file = "ahv_vm_disk_pci_clone_from_image.py.jinja2" + + elif operation_type == "cloneFromVMDiskPackage": + schema_file = "ahv_vm_disk_pci_clone_from_pkg.py.jinja2" + + elif operation_type == "allocateOnStorageContainer": + schema_file = "ahv_vm_disk_pci_allocate_container.py.jinja2" + + else: + LOG.error("Unknown operation type {}".format(operation_type)) + sys.exit(-1) + + else: + LOG.error("Unknown adapter type {}".format(adapter_type)) + sys.exit(-1) + + else: # CD-ROM + if adapter_type == "SATA": + if operation_type == "cloneFromImageService": + schema_file = "ahv_vm_cdrom_sata_clone_from_image.py.jinja2" + + elif operation_type == "cloneFromVMDiskPackage": + schema_file = "ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2" + + elif operation_type == "emptyCdRom": + schema_file = "ahv_vm_cdrom_sata_empty_cdrom.py.jinja2" + + else: + LOG.error("Unknown operation type {}".format(operation_type)) + sys.exit(-1) + + elif adapter_type == "IDE": + if operation_type == "cloneFromImageService": + schema_file = "ahv_vm_cdrom_ide_clone_from_image.py.jinja2" + + elif operation_type == "cloneFromVMDiskPackage": + schema_file = "ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2" + + elif operation_type == "emptyCdRom": + schema_file = "ahv_vm_cdrom_ide_empty_cdrom.py.jinja2" + + else: + LOG.error("Unknown operation type {}".format(operation_type)) + sys.exit(-1) + + else: + LOG.error("Unknown adapter type {}".format(adapter_type)) + sys.exit(-1) + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ahv_vm_gc.py b/framework/calm/dsl/decompile/ahv_vm_gc.py new file mode 100644 index 0000000..17dca0a --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm_gc.py @@ -0,0 +1,81 @@ +import sys +import os +from ruamel import yaml + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.credential import get_cred_var_name +from calm.dsl.decompile.file_handler import get_specs_dir, get_specs_dir_key +from calm.dsl.builtins import RefType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm_gc(cls, vm_name_prefix=""): + + schema_file = "" + user_attrs = {} + + user_attrs = cls.get_dict() + cloud_init = user_attrs.get("cloud_init", {}) + sys_prep = user_attrs.get("sysprep", {}) + + file_name = "" + spec_dir = get_specs_dir() + if cloud_init: + schema_file = "ahv_vm_cloud_init.py.jinja2" + file_name = "{}_cloud_init_data.yaml".format(vm_name_prefix) + user_attrs["filename"] = "os.path.join('{}', '{}')".format( + get_specs_dir_key(), file_name + ) + cloud_init_user_data = cloud_init.get("user_data", "") + if not cloud_init_user_data: + return + + with open(os.path.join(spec_dir, file_name), "w+") as fd: + # TODO take care of macro case + fd.write(yaml.dump(cloud_init_user_data, default_flow_style=False)) + + elif sys_prep: + file_name = "{}_sysprep_unattend_xml.xml".format(vm_name_prefix) + user_attrs["filename"] = "os.path.join('{}', '{}')".format( + get_specs_dir_key(), file_name + ) + sysprep_unattend_xml = sys_prep.get("unattend_xml", "") + with open(os.path.join(spec_dir, file_name), "w+") as fd: + fd.write(sysprep_unattend_xml) + + install_type = sys_prep.get("install_type", "PREPARED") + is_domain = sys_prep.get("is_domain", False) + + if is_domain and sys_prep.get("domain_credential_reference"): + cred = RefType.decompile(sys_prep["domain_credential_reference"]) + user_attrs["credential"] = "ref({})".format( + get_cred_var_name(getattr(cred, "name", "") or cred.__name__) + ) + + if install_type == "FRESH": + if is_domain: + schema_file = "ahv_vm_fresh_sysprep_with_domain.py.jinja2" + else: + schema_file = "ahv_vm_fresh_sysprep_without_domain.py.jinja2" + + elif install_type == "PREPARED": + if is_domain: + schema_file = "ahv_vm_prepared_sysprep_with_domain.py.jinja2" + else: + schema_file = "ahv_vm_prepared_sysprep_without_domain.py.jinja2" + + else: + LOG.error( + "Unknown install type '{}' for sysprep guest customization".format( + install_type + ) + ) + sys.exit(-1) + + else: + return None + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ahv_vm_gpu.py b/framework/calm/dsl/decompile/ahv_vm_gpu.py new file mode 100644 index 0000000..4adc07b --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm_gpu.py @@ -0,0 +1,39 @@ +import sys + +from calm.dsl.decompile.render import render_template +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm_gpu(cls): + + gpu_vendor_key_map = {"AMD": "Amd", "INTEL": "Intel", "NVIDIA": "Nvidia"} + gpu_mode_key_map = { + "PASSTHROUGH_GRAPHICS": "passThroughGraphic", + "PASSTHROUGH_COMPUTE": "passThroughCompute", + "VIRTUAL": "virtual", + } + gpu_data = cls.get_dict() + + user_attrs = {} + gpu_vendor = gpu_data["vendor"] + if gpu_vendor_key_map.get(gpu_vendor, None): + user_attrs["vendor_key"] = gpu_vendor_key_map[gpu_vendor] + + else: + LOG.error("Unknown GPU vendor '{}'".format(gpu_vendor)) + sys.exit(-1) + + gpu_mode = gpu_data["mode"] + if gpu_mode_key_map.get(gpu_mode, None): + user_attrs["mode_key"] = gpu_mode_key_map[gpu_mode] + + else: + LOG.error("Unknown GPU mode '{}'".format(gpu_mode)) + sys.exit(-1) + + user_attrs["device_id"] = gpu_data.get("device_id", 0) + + text = render_template(schema_file="ahv_vm_gpu.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ahv_vm_nic.py b/framework/calm/dsl/decompile/ahv_vm_nic.py new file mode 100644 index 0000000..9a786a8 --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm_nic.py @@ -0,0 +1,82 @@ +import sys + +from calm.dsl.decompile.render import render_template +from calm.dsl.store import Cache +from calm.dsl.constants import CACHE +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm_nic(cls): + + # Note cls.get_dict() may not contain subnet name + # So it will fail. So use class attributes instead of getting dict object + subnet_ref = cls.subnet_reference + if subnet_ref: + subnet_ref = subnet_ref.get_dict() + + nic_type = cls.nic_type + network_function_nic_type = cls.network_function_nic_type + + user_attrs = {} + subnet_uuid = subnet_ref.get("uuid", "") + if subnet_uuid.startswith("@@{") and subnet_uuid.endswith("}@@"): + user_attrs["subnet_name"] = subnet_uuid + else: + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=subnet_uuid + ) + if not subnet_cache_data: + LOG.error("Subnet with uuid '{}' not found".format(subnet_uuid)) + sys.exit(-1) + + user_attrs["subnet_name"] = subnet_cache_data["name"] + if subnet_cache_data.get("vpc_name", ""): + user_attrs["vpc_name"] = subnet_cache_data["vpc_name"] + else: + user_attrs["cluster_name"] = subnet_cache_data["cluster_name"] + + schema_file = "" + if nic_type == "NORMAL_NIC": + if network_function_nic_type == "INGRESS": + schema_file = "ahv_normal_ingress_nic.py.jinja2" + + elif network_function_nic_type == "EGRESS": + schema_file = "ahv_normal_egress_nic.py.jinja2" + + elif network_function_nic_type == "TAP": + schema_file = "ahv_normal_tap_nic.py.jinja2" + + else: + LOG.error( + "Unknown network function nic type '{}'".format( + network_function_nic_type + ) + ) + sys.exit(-1) + + elif nic_type == "DIRECT_NIC": + if network_function_nic_type == "INGRESS": + schema_file = "ahv_direct_ingress_nic.py.jinja2" + + elif network_function_nic_type == "EGRESS": + schema_file = "ahv_direct_egress_nic.py.jinja2" + + elif network_function_nic_type == "TAP": + schema_file = "ahv_direct_tap_nic.py.jinja2" + + else: + LOG.error( + "Unknown network function nic type '{}'".format( + network_function_nic_type + ) + ) + sys.exit(-1) + + else: + LOG.error("Unknown nic type '{}'".format(nic_type)) + sys.exit(-1) + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ahv_vm_resources.py b/framework/calm/dsl/decompile/ahv_vm_resources.py new file mode 100644 index 0000000..49f5f0b --- /dev/null +++ b/framework/calm/dsl/decompile/ahv_vm_resources.py @@ -0,0 +1,50 @@ +from calm.dsl.builtins import AhvVmResourcesType + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.ahv_vm_disk import render_ahv_vm_disk +from calm.dsl.decompile.ahv_vm_nic import render_ahv_vm_nic +from calm.dsl.decompile.ahv_vm_gc import render_ahv_vm_gc +from calm.dsl.decompile.ahv_vm_gpu import render_ahv_vm_gpu +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_ahv_vm_resources(cls, boot_config, vm_name_prefix=""): + + LOG.debug("Rendering {} ahv_vm_resources template".format(cls.__name__)) + if not isinstance(cls, AhvVmResourcesType): + raise TypeError("{} is not of type {}".format(cls, AhvVmResourcesType)) + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + + # Memory to GiB + user_attrs["memory"] = int(user_attrs["memory"]) // 1024 + + disk_list = [] + for disk in cls.disks: + disk_list.append(render_ahv_vm_disk(disk, boot_config)) + + nic_list = [] + for nic in cls.nics: + nic_list.append(render_ahv_vm_nic(nic)) + + gpu_list = [] + for gpu in cls.gpus: + gpu_list.append(render_ahv_vm_gpu(gpu)) + + user_attrs.update( + { + "disks": ", ".join(disk_list), + "nics": ", ".join(nic_list), + "gpus": ", ".join(gpu_list), + } + ) + if getattr(cls, "guest_customization", None): + user_attrs["guest_customization"] = render_ahv_vm_gc( + cls.guest_customization, vm_name_prefix=vm_name_prefix + ) + + text = render_template(schema_file="ahv_vm_resources.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/blueprint.py b/framework/calm/dsl/decompile/blueprint.py new file mode 100644 index 0000000..5222aa3 --- /dev/null +++ b/framework/calm/dsl/decompile/blueprint.py @@ -0,0 +1,52 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import BlueprintType +from calm.dsl.decompile.credential import get_cred_var_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_blueprint_template(cls): + + LOG.debug("Rendering {} blueprint template".format(cls.__name__)) + if not isinstance(cls, BlueprintType): + raise TypeError("{} is not of type {}".format(cls, BlueprintType)) + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + credential_list = [] + for cred in cls.credentials: + credential_list.append( + get_cred_var_name(getattr(cred, "name", "") or cred.__name__) + ) + + service_list = [] + for service in cls.services: + service_list.append(service.__name__) + + package_list = [] + for package in cls.packages: + package_list.append(package.__name__) + + substrate_list = [] + for substrate in cls.substrates: + substrate_list.append(substrate.__name__) + + profile_list = [] + for profile in cls.profiles: + profile_list.append(profile.__name__) + + user_attrs.update( + { + "services": ", ".join(service_list), + "packages": ", ".join(package_list), + "substrates": ", ".join(substrate_list), + "profiles": ", ".join(profile_list), + "credentials": ", ".join(credential_list), + } + ) + + text = render_template("blueprint.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/bp_file_helper.py b/framework/calm/dsl/decompile/bp_file_helper.py new file mode 100644 index 0000000..c25b87f --- /dev/null +++ b/framework/calm/dsl/decompile/bp_file_helper.py @@ -0,0 +1,215 @@ +import click +import os + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.service import render_service_template +from calm.dsl.decompile.package import render_package_template +from calm.dsl.decompile.vm_disk_package import render_vm_disk_package_template + +from calm.dsl.decompile.substrate import render_substrate_template +from calm.dsl.decompile.deployment import render_deployment_template +from calm.dsl.decompile.profile import render_profile_template +from calm.dsl.decompile.credential import render_credential_template, get_cred_files + +from calm.dsl.decompile.blueprint import render_blueprint_template +from calm.dsl.decompile.metadata import render_metadata_template +from calm.dsl.decompile.variable import get_secret_variable_files +from calm.dsl.decompile.file_handler import get_local_dir +from calm.dsl.builtins import BlueprintType, ServiceType, PackageType +from calm.dsl.builtins import DeploymentType, ProfileType, SubstrateType + + +def render_bp_file_template(cls, with_secrets=False, metadata_obj=None): + + if not isinstance(cls, BlueprintType): + raise TypeError("{} is not of type {}".format(cls, BlueprintType)) + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ + + # Find default cred + default_cred = cls.default_cred + default_cred_name = getattr(default_cred, "name", "") or getattr( + default_cred, "__name__", "" + ) + + credential_list = [] + for index, cred in enumerate(cls.credentials): + cred_name = getattr(cred, "name", "") or cred.__name__ + if default_cred_name and cred_name == default_cred_name: + cred.default = True + credential_list.append(render_credential_template(cred)) + + # Map to store the [Name: Rendered template for entity] + entity_name_text_map = {} + + # Edges map to store the edges (dependencies) between entities + entity_edges = {} + + for service in cls.services: + entity_name_text_map[service.get_ref().name] = service + + # Edge from services to other entities + for dep in service.dependencies: + add_edges(entity_edges, dep.get_ref().name, service.get_ref().name) + + downloadable_img_list = [] + vm_images = [] + for package in cls.packages: + if getattr(package, "__kind__") == "app_package": + entity_name_text_map[package.get_ref().name] = package + + # Edge from package to service + for dep in package.services: + add_edges(entity_edges, dep.get_ref().name, package.get_ref().name) + + else: + downloadable_img_list.append(render_vm_disk_package_template(package)) + vm_images.append(package.get_ref().name) + # Printing all the downloadable images at the top, so ignore its edges + + for substrate in cls.substrates: + entity_name_text_map[substrate.get_ref().name] = substrate + + deployments = [] + for profile in cls.profiles: + entity_name_text_map[profile.get_ref().name] = profile + + # Deployments + deployments.extend(profile.deployments) + for dep in deployments: + add_edges(entity_edges, dep.get_ref().name, profile.get_ref().name) + + for deployment in deployments: + entity_name_text_map[deployment.get_ref().name] = deployment + + # Edges from deployment to package + for dep in deployment.packages: + add_edges(entity_edges, dep.get_ref().name, deployment.get_ref().name) + + # Edges from deployment to substrate + add_edges( + entity_edges, deployment.substrate.get_ref().name, deployment.get_ref().name + ) + + # Other dependencies + for dep in deployment.dependencies: + add_edges(entity_edges, dep.get_ref().name, deployment.get_ref().name) + + # Getting the local files used for secrets + secret_files = get_secret_variable_files() + secret_files.extend(get_cred_files()) + + if with_secrets: + # Fill the secret if flag is set + if secret_files: + click.secho("Enter the value to be used in secret files") + for file_name in secret_files: + secret_val = click.prompt( + "\nValue for {}".format(file_name), + default="", + show_default=False, + hide_input=True, + ) + file_loc = os.path.join(get_local_dir(), file_name) + with open(file_loc, "w+") as fd: + fd.write(secret_val) + + dependepent_entities = [] + dependepent_entities = get_ordered_entities(entity_name_text_map, entity_edges) + + # Rendering templates + for k, v in enumerate(dependepent_entities): + if isinstance(v, ServiceType): + dependepent_entities[k] = render_service_template(v) + + elif isinstance(v, PackageType): + dependepent_entities[k] = render_package_template(v) + + elif isinstance(v, ProfileType): + dependepent_entities[k] = render_profile_template(v) + + elif isinstance(v, DeploymentType): + dependepent_entities[k] = render_deployment_template(v) + + elif isinstance(v, SubstrateType): + dependepent_entities[k] = render_substrate_template(v, vm_images=vm_images) + + blueprint = render_blueprint_template(cls) + + # Rendere blueprint metadata + metadata_str = render_metadata_template(metadata_obj) + + user_attrs.update( + { + "secret_files": secret_files, + "credentials": credential_list, + "vm_images": downloadable_img_list, + "dependent_entities": dependepent_entities, + "blueprint": blueprint, + "metadata": metadata_str, + } + ) + + text = render_template("bp_file_helper.py.jinja2", obj=user_attrs) + return text.strip() + + +def get_ordered_entities(entity_name_text_map, entity_edges): + """Returns the list in which all rendered templates are ordered according to depedencies""" + + res_entity_list = [] + entity_indegree_count = {} + + # Initializing indegree to each entity by 0 + for entity_name in list(entity_name_text_map.keys()): + entity_indegree_count[entity_name] = 0 + + # Iterate over edges and update indegree count for each entity + for entity_name, to_entity_list in entity_edges.items(): + for entity in to_entity_list: + entity_indegree_count[entity] += 1 + + # Queue to store entities having indegree 0 + queue = [] + + # Push entities having indegree count 0 + for entity_name, indegree in entity_indegree_count.items(): + if indegree == 0: + queue.append(entity_name) + + # Topological sort + while queue: + + ql = len(queue) + + # Inserting entities in result + for entity in queue: + res_entity_list.append(entity_name_text_map[entity]) + + while ql: + # Popping the top element + + cur_entity = queue.pop(0) + + # Iterating its edges, and decrease the indegree of dependent entity by 1 + for to_entity in entity_edges.get(cur_entity, []): + entity_indegree_count[to_entity] -= 1 + + # If indegree is zero push to queue + if entity_indegree_count[to_entity] == 0: + queue.append(to_entity) + + ql -= 1 + + return res_entity_list + + +def add_edges(edges, from_entity, to_entity): + """Add edges in map edges""" + + if not edges.get(from_entity): + edges[from_entity] = [] + + edges[from_entity].append(to_entity) diff --git a/framework/calm/dsl/decompile/config_spec.py b/framework/calm/dsl/decompile/config_spec.py new file mode 100644 index 0000000..3613815 --- /dev/null +++ b/framework/calm/dsl/decompile/config_spec.py @@ -0,0 +1,46 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import ConfigSpecType, get_valid_identifier +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_restore_config_template(cls, entity_context): + LOG.debug("Rendering {} restore config template".format(cls.__name__)) + if not isinstance(cls, ConfigSpecType): + raise TypeError("{} is not of type {}".format(cls, ConfigSpecType)) + + _user_attrs = cls.get_user_attrs() + user_attrs = dict() + user_attrs["name"] = _user_attrs["name"] or cls.__name__ + attrs = _user_attrs["attrs_list"][0] + user_attrs["target"] = get_valid_identifier( + attrs["target_any_local_reference"]["name"] + ) + user_attrs["delete_vm_post_restore"] = attrs["delete_vm_post_restore"] + text = render_template(schema_file="restore_config.py.jinja2", obj=user_attrs) + return text.strip() + + +def render_snapshot_config_template(cls, entity_context, CONFIG_SPEC_MAP): + LOG.debug("Rendering {} snapshot config template".format(cls.__name__)) + if not isinstance(cls, ConfigSpecType): + raise TypeError("{} is not of type {}".format(cls, ConfigSpecType)) + + _user_attrs = cls.get_user_attrs() + user_attrs = dict() + user_attrs["name"] = _user_attrs["name"] or cls.__name__ + user_attrs["restore_config"] = CONFIG_SPEC_MAP[ + _user_attrs["config_references"][0].name + ]["local_name"] + attrs = _user_attrs["attrs_list"][0] + user_attrs["target"] = get_valid_identifier( + attrs["target_any_local_reference"]["name"] + ) + user_attrs["num_of_replicas"] = attrs["num_of_replicas"] + if attrs.get("app_protection_policy_reference", None): + user_attrs["policy"] = attrs["app_protection_policy_reference"]["name"] + if attrs.get("app_protection_rule_reference", None): + user_attrs["rule"] = attrs["app_protection_rule_reference"]["name"] + text = render_template(schema_file="snapshot_config.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/credential.py b/framework/calm/dsl/decompile/credential.py new file mode 100644 index 0000000..e4c11ae --- /dev/null +++ b/framework/calm/dsl/decompile/credential.py @@ -0,0 +1,88 @@ +import os + +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import CredentialType +from calm.dsl.decompile.file_handler import get_local_dir +from calm.dsl.log import get_logging_handle +from calm.dsl.builtins import get_valid_identifier + +LOG = get_logging_handle(__name__) +CRED_VAR_NAME_MAP = {} +CRED_FILES = [] + + +def render_credential_template(cls): + + global CRED_VAR_NAME_MAP, CRED_FILES + LOG.debug("Rendering {} credential template".format(cls.__name__)) + if not isinstance(cls, CredentialType): + raise TypeError("{} is not of type {}".format(cls, CredentialType)) + + user_attrs = cls.get_user_attrs() + user_attrs["description"] = cls.__doc__ + + cred_type = user_attrs.get("cred_class", "") + + var_name = "BP_CRED_{}".format(get_valid_identifier(cls.__name__)) + user_attrs["var_name"] = var_name + if user_attrs.get("editables", {}): + user_attrs["editables"] = user_attrs["editables"].get_dict() + CRED_VAR_NAME_MAP[user_attrs["name"]] = var_name + + if cred_type == "static": + + file_name = "{}_{}".format(var_name, user_attrs["type"]) + create_file_from_file_name(file_name) + user_attrs["value"] = file_name + text = render_template("basic_credential.py.jinja2", obj=user_attrs) + + elif cred_type == "dynamic": + + for var_obj in user_attrs.get("variable_list", []): + if var_obj.type == "SECRET": + file_name = "{}_VAR_{}_SECRET".format( + var_name, get_valid_identifier(var_obj.name) + ) + create_file_from_file_name(file_name) + var_obj.value = file_name + + text = render_template("dynamic_credential.py.jinja2", obj=user_attrs) + + else: + raise TypeError("{} is not a supported cred class".format(cred_type)) + return text.strip() + + +def create_file_from_file_name(file_name): + """create a file on local directory and add to global file stack for given file name""" + file_loc = os.path.join(get_local_dir(), file_name) + + # Storing empty value in the file + with open(file_loc, "w+") as fd: + fd.write("") + + CRED_FILES.append(file_name) + + +def get_cred_var_name(cred_name): + """Get the var name for credential""" + + if cred_name not in CRED_VAR_NAME_MAP: + raise ValueError("{} not found".format(cred_name)) + + return CRED_VAR_NAME_MAP[cred_name] + + +def get_cred_files(): + """Returns the cred files created for credential""" + + global CRED_FILES + return CRED_FILES + + +def init_cred_globals(): + """Reinitialises global vars used for credentials""" + + global CRED_VAR_NAME_MAP, CRED_FILES + CRED_VAR_NAME_MAP = {} + CRED_FILES = [] diff --git a/framework/calm/dsl/decompile/decompile_render.py b/framework/calm/dsl/decompile/decompile_render.py new file mode 100644 index 0000000..1e5927a --- /dev/null +++ b/framework/calm/dsl/decompile/decompile_render.py @@ -0,0 +1,32 @@ +import os +from black import format_str, FileMode + +from calm.dsl.log import get_logging_handle +from calm.dsl.decompile.bp_file_helper import render_bp_file_template +from calm.dsl.decompile.file_handler import init_bp_dir + +LOG = get_logging_handle(__name__) + + +def create_bp_file(dir_name, bp_data): + + bp_path = os.path.join(dir_name, "blueprint.py") + with open(bp_path, "w") as fd: + fd.write(bp_data) + + +def create_bp_dir(bp_cls=None, bp_dir=None, with_secrets=False, metadata_obj=None): + + if not bp_dir: + bp_dir = os.path.join(os.getcwd(), bp_cls.__name__) + + LOG.info("Creating blueprint directory") + _, _, _, _ = init_bp_dir(bp_dir) + LOG.info("Rendering blueprint file template") + bp_data = render_bp_file_template( + cls=bp_cls, with_secrets=with_secrets, metadata_obj=metadata_obj + ) + LOG.info("Formatting blueprint file using black") + bp_data = format_str(bp_data, mode=FileMode()) + LOG.info("Creating blueprint file") + create_bp_file(bp_dir, bp_data) diff --git a/framework/calm/dsl/decompile/deployment.py b/framework/calm/dsl/decompile/deployment.py new file mode 100644 index 0000000..8bab0f8 --- /dev/null +++ b/framework/calm/dsl/decompile/deployment.py @@ -0,0 +1,49 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import DeploymentType +from calm.dsl.decompile.ref import render_ref_template +from calm.dsl.decompile.ref_dependency import update_deployment_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_deployment_template(cls): + + LOG.debug("Rendering {} deployment template".format(cls.__name__)) + if not isinstance(cls, DeploymentType): + raise TypeError("{} is not of type {}".format(cls, DeploymentType)) + + # Entity context + entity_context = "Deployment_" + cls.__name__ # NoQa + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + # Update deployment name map and gui name + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + # updating ui and dsl name mapping + update_deployment_name(gui_display_name, cls.__name__) + + depends_on_list = [] + for entity in user_attrs.get("dependencies", []): + depends_on_list.append(render_ref_template(entity)) + + if cls.substrate: + user_attrs["substrate"] = render_ref_template(cls.substrate) + + package_list = [] + for entity in user_attrs.get("packages", []): + package_list.append(render_ref_template(entity)) + + user_attrs["packages"] = ", ".join(package_list) + user_attrs["dependencies"] = ",".join(depends_on_list) + + if user_attrs.get("editables", {}): + user_attrs["editables"] = user_attrs["editables"].get_dict() + + text = render_template("deployment.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/file_handler.py b/framework/calm/dsl/decompile/file_handler.py new file mode 100644 index 0000000..e5fee60 --- /dev/null +++ b/framework/calm/dsl/decompile/file_handler.py @@ -0,0 +1,74 @@ +import os + +LOCAL_DIR = None +SCRIPTS_DIR = None +SPECS_DIR = None +BP_DIR = None + +LOCAL_DIR_KEY = ".local" +SCRIPTS_DIR_KEY = "scripts" +SPECS_DIR_KEY = "specs" + + +def make_bp_dirs(bp_dir): + + if not os.path.isdir(bp_dir): + os.makedirs(bp_dir) + + local_dir = os.path.join(bp_dir, LOCAL_DIR_KEY) + if not os.path.isdir(local_dir): + os.makedirs(local_dir) + + spec_dir = os.path.join(bp_dir, SPECS_DIR_KEY) + if not os.path.isdir(spec_dir): + os.makedirs(spec_dir) + + scripts_dir = os.path.join(bp_dir, SCRIPTS_DIR_KEY) + if not os.path.isdir(scripts_dir): + os.makedirs(scripts_dir) + + return (bp_dir, local_dir, spec_dir, scripts_dir) + + +def init_bp_dir(bp_dir): + + global LOCAL_DIR, SCRIPTS_DIR, SPECS_DIR, BP_DIR + BP_DIR, LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR = make_bp_dirs(bp_dir) + + return (BP_DIR, LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR) + + +def get_bp_dir(): + return BP_DIR + + +def get_local_dir(): + return LOCAL_DIR + + +def get_specs_dir(): + return SPECS_DIR + + +def get_scripts_dir(): + return SCRIPTS_DIR + + +def get_local_dir_key(): + return LOCAL_DIR_KEY + + +def get_specs_dir_key(): + return SPECS_DIR_KEY + + +def get_scripts_dir_key(): + return SCRIPTS_DIR_KEY + + +def init_file_globals(): + global LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR, BP_DIR + LOCAL_DIR = None + SCRIPTS_DIR = None + SPECS_DIR = None + BP_DIR = None diff --git a/framework/calm/dsl/decompile/main.py b/framework/calm/dsl/decompile/main.py new file mode 100644 index 0000000..891839f --- /dev/null +++ b/framework/calm/dsl/decompile/main.py @@ -0,0 +1,15 @@ +from calm.dsl.decompile.action import init_action_globals +from calm.dsl.decompile.credential import init_cred_globals +from calm.dsl.decompile.variable import init_variable_globals +from calm.dsl.decompile.ref_dependency import init_ref_dependency_globals +from calm.dsl.decompile.file_handler import init_file_globals + + +def init_decompile_context(): + + # Reinitializes context for decompile + init_action_globals() + init_cred_globals() + init_file_globals() + init_ref_dependency_globals() + init_variable_globals() diff --git a/framework/calm/dsl/decompile/metadata.py b/framework/calm/dsl/decompile/metadata.py new file mode 100644 index 0000000..82c7aa5 --- /dev/null +++ b/framework/calm/dsl/decompile/metadata.py @@ -0,0 +1,30 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import MetadataType + + +def render_metadata_template(cls): + + if not cls: + return + + if not isinstance(cls, MetadataType): + raise TypeError("{} is not of type {}".format(cls, MetadataType)) + + cls_data = cls.get_dict() + user_attrs = {} + + if cls_data.get("categories"): + user_attrs["categories"] = cls_data["categories"] + + # NOTE: Project and Owner info is not provided by calm export_file api yet. + # When available add their rendered_text to user_attrs and modify jinja template accordingly + + # NOTE: Name of class is constant i.e. BpMetadata + + # If metadata is not available, return empty string + if not user_attrs: + return "" + + text = render_template("metadata.py.jinja2", obj=user_attrs) + + return text.strip() diff --git a/framework/calm/dsl/decompile/package.py b/framework/calm/dsl/decompile/package.py new file mode 100644 index 0000000..73f4e2c --- /dev/null +++ b/framework/calm/dsl/decompile/package.py @@ -0,0 +1,57 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import PackageType +from calm.dsl.decompile.ref import render_ref_template +from calm.dsl.decompile.variable import render_variable_template +from calm.dsl.decompile.action import render_action_template +from calm.dsl.decompile.ref_dependency import update_package_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_package_template(cls): + + LOG.debug("Rendering {} package template".format(cls.__name__)) + if not isinstance(cls, PackageType): + raise TypeError("{} is not of type {}".format(cls, PackageType)) + + # Entity context + entity_context = "Package_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + # Update package name map + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + # updating ui and dsl name mapping + update_package_name(gui_display_name, cls.__name__) + + service_list = [] + for entity in user_attrs.get("services", []): + service_list.append(render_ref_template(entity)) + + variable_list = [] + for entity in user_attrs.get("variables", []): + variable_list.append(render_variable_template(entity, entity_context)) + + action_list = [] + if hasattr(cls, "__install__"): + cls.__install__.__name__ = "__install__" + cls.__install__.name = "__install__" + action_list.append(render_action_template(cls.__install__, entity_context)) + + if hasattr(cls, "__uninstall__"): + cls.__uninstall__.__name__ = "__uninstall__" + cls.__uninstall__.name = "__uninstall__" + action_list.append(render_action_template(cls.__uninstall__, entity_context)) + + user_attrs["services"] = ",".join(service_list) + user_attrs["variables"] = variable_list + user_attrs["actions"] = action_list + + text = render_template("package.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/parallel_task.py b/framework/calm/dsl/decompile/parallel_task.py new file mode 100644 index 0000000..8a9bc8e --- /dev/null +++ b/framework/calm/dsl/decompile/parallel_task.py @@ -0,0 +1,21 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.task import render_task_template + + +def render_parallel_task_template( + task_list, entity_context, RUNBOOK_ACTION_MAP, CONFIG_SPEC_MAP +): + """render parallel tasks template""" + + rendered_tasks = [] + for task in task_list: + rendered_tasks.append( + render_task_template( + task, entity_context, RUNBOOK_ACTION_MAP, CONFIG_SPEC_MAP + ) + ) + + user_attrs = {"tasks": rendered_tasks} + + text = render_template(schema_file="parallel_task.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/profile.py b/framework/calm/dsl/decompile/profile.py new file mode 100644 index 0000000..dc89fdc --- /dev/null +++ b/framework/calm/dsl/decompile/profile.py @@ -0,0 +1,86 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import ProfileType +from calm.dsl.decompile.action import render_action_template +from calm.dsl.decompile.variable import render_variable_template +from calm.dsl.decompile.ref_dependency import update_profile_name +from calm.dsl.decompile.config_spec import ( + render_snapshot_config_template, + render_restore_config_template, +) +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + +CONFIG_SPEC_MAP = {} + + +def render_profile_template(cls): + + LOG.debug("Rendering {} profile template".format(cls.__name__)) + if not isinstance(cls, ProfileType): + raise TypeError("{} is not of type {}".format(cls, ProfileType)) + + # Entity context + entity_context = "Profile_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + # Update profile name map and gui name + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + # updating ui and dsl name mapping + update_profile_name(gui_display_name, cls.__name__) + + restore_config_list = [] + for idx, entity in enumerate(user_attrs.get("restore_configs", [])): + CONFIG_SPEC_MAP[entity.name] = { + "global_name": "{}.restore_configs[{}]".format(cls.__name__, idx), + "local_name": "restore_configs[{}]".format(idx), + } + restore_config_list.append( + render_restore_config_template(entity, entity_context) + ) + + snapshot_config_list = [] + for idx, entity in enumerate(user_attrs.get("snapshot_configs", [])): + CONFIG_SPEC_MAP[entity.name] = { + "global_name": "{}.snapshot_configs[{}]".format(cls.__name__, idx), + "local_name": "snapshot_configs[{}]".format(idx), + } + snapshot_config_list.append( + render_snapshot_config_template(entity, entity_context, CONFIG_SPEC_MAP) + ) + update_config_list = [] + for idx, entity in enumerate(user_attrs.get("update_configs", [])): + CONFIG_SPEC_MAP[entity.name] = { + "global_name": "{}.update_configs[{}]".format(cls.__name__, idx), + "local_name": "update_configs[{}]".format(idx), + } + update_config_list.append(render_update_config_template(entity, entity_context)) + + action_list = [] + for action in user_attrs.get("actions", []): + action_list.append( + render_action_template(action, entity_context, CONFIG_SPEC_MAP) + ) + + deployment_list = [] + for deployment in user_attrs.get("deployments", []): + deployment_list.append(deployment.__name__) + + variable_list = [] + for entity in user_attrs.get("variables", []): + variable_list.append(render_variable_template(entity, entity_context)) + + user_attrs["variables"] = variable_list + user_attrs["deployments"] = ", ".join(deployment_list) + user_attrs["actions"] = action_list + user_attrs["restore_configs"] = ", ".join(restore_config_list) + user_attrs["snapshot_configs"] = ", ".join(snapshot_config_list) + + text = render_template("profile.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/readiness_probe.py b/framework/calm/dsl/decompile/readiness_probe.py new file mode 100644 index 0000000..abadfd4 --- /dev/null +++ b/framework/calm/dsl/decompile/readiness_probe.py @@ -0,0 +1,27 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import ReadinessProbeType +from calm.dsl.decompile.credential import get_cred_var_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_readiness_probe_template(cls): + + LOG.debug("Rendering {} readiness probe template".format(cls.__name__)) + if not isinstance(cls, ReadinessProbeType): + raise TypeError("{} is not of type {}".format(cls, ReadinessProbeType)) + + user_attrs = cls.get_user_attrs() + + # deal with cred + cred = user_attrs["credential"] + if cred: + user_attrs["credential"] = "ref({})".format( + get_cred_var_name(getattr(cred, "name", "") or cred.__name__) + ) + + schema_file = "readiness_probe.py.jinja2" + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ref.py b/framework/calm/dsl/decompile/ref.py new file mode 100644 index 0000000..741a3e8 --- /dev/null +++ b/framework/calm/dsl/decompile/ref.py @@ -0,0 +1,53 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import RefType +from calm.dsl.log import get_logging_handle +from calm.dsl.decompile.ref_dependency import ( + get_service_name, + get_profile_name, + get_substrate_name, +) +from calm.dsl.decompile.ref_dependency import get_package_name, get_deployment_name + + +LOG = get_logging_handle(__name__) + + +def render_ref_template(cls): + + LOG.debug("Rendering {} ref template".format(cls.__name__)) + if not isinstance(cls, RefType): + raise TypeError("{} is not of type {}".format(cls, RefType)) + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = getattr(cls, "name") + if not user_attrs["name"]: + user_attrs["name"] = cls.__name__ + schema_file = "ref.py.jinja2" + + kind = cls.kind + if kind == "app_service": + cls_name = get_service_name(user_attrs["name"]) + if cls_name: + user_attrs["name"] = cls_name + elif kind == "app_package": + cls_name = get_package_name(user_attrs["name"]) + if cls_name: + user_attrs["name"] = cls_name + elif kind == "app_substrate": + cls_name = get_substrate_name(user_attrs["name"]) + if cls_name: + user_attrs["name"] = cls_name + elif kind == "app_blueprint_deployment": + cls_name = get_deployment_name(user_attrs["name"]) + if cls_name: + user_attrs["name"] = cls_name + elif kind == "app_profile": + cls_name = get_profile_name(user_attrs["name"]) + if cls_name: + user_attrs["name"] = cls_name + + # Updating name attribute of class + cls.name = user_attrs["name"] + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/ref_dependency.py b/framework/calm/dsl/decompile/ref_dependency.py new file mode 100644 index 0000000..7a3f398 --- /dev/null +++ b/framework/calm/dsl/decompile/ref_dependency.py @@ -0,0 +1,82 @@ +SERVICE_NAME_MAP = {} +PROFILE_NAME_MAP = {} +SUBSTRATE_NAME_MAP = {} +PACKAGE_NAME_MAP = {} +DEPLOYMENT_NAME_MAP = {} + + +def get_service_name(name): + """returns the class name used for entity ref""" + + global SERVICE_NAME_MAP + return SERVICE_NAME_MAP.get(name, None) + + +def update_service_name(ui_name, dsl_name): + """updates the ui and dsl name mapping""" + + global SERVICE_NAME_MAP + SERVICE_NAME_MAP[ui_name] = dsl_name + + +def get_profile_name(name): + """returns the class name used for entity ref""" + + global PROFILE_NAME_MAP + return PROFILE_NAME_MAP.get(name, None) + + +def update_profile_name(ui_name, dsl_name): + + global PROFILE_NAME_MAP + PROFILE_NAME_MAP[ui_name] = dsl_name + + +def get_substrate_name(name): + """returns the class name used for entity ref""" + + global SUBSTRATE_NAME_MAP + return SUBSTRATE_NAME_MAP.get(name, None) + + +def update_substrate_name(ui_name, dsl_name): + + global SUBSTRATE_NAME_MAP + SUBSTRATE_NAME_MAP[ui_name] = dsl_name + + +def get_package_name(name): + """returns the class name used for entity ref""" + + global PACKAGE_NAME_MAP + return PACKAGE_NAME_MAP.get(name, None) + + +def update_package_name(ui_name, dsl_name): + + global PACKAGE_NAME_MAP + PACKAGE_NAME_MAP[ui_name] = dsl_name + + +def get_deployment_name(name): + """returns the class name used for entity ref""" + + global DEPLOYMENT_NAME_MAP + return DEPLOYMENT_NAME_MAP.get(name, None) + + +def update_deployment_name(ui_name, dsl_name): + + global DEPLOYMENT_NAME_MAP + DEPLOYMENT_NAME_MAP[ui_name] = dsl_name + + +def init_ref_dependency_globals(): + + global SERVICE_NAME_MAP, PROFILE_NAME_MAP, SUBSTRATE_NAME_MAP, PACKAGE_NAME_MAP, DEPLOYMENT_NAME_MAP + + SERVICE_NAME_MAP = {} + PROFILE_NAME_MAP = {} + SUBSTRATE_NAME_MAP = {} + PACKAGE_NAME_MAP = {} + DEPLOYMENT_NAME_MAP = {} diff --git a/framework/calm/dsl/decompile/render.py b/framework/calm/dsl/decompile/render.py new file mode 100644 index 0000000..71b4ac8 --- /dev/null +++ b/framework/calm/dsl/decompile/render.py @@ -0,0 +1,17 @@ +from jinja2 import Environment, PackageLoader + + +def get_template(schema_file): + + loader = PackageLoader(__name__, "schemas") + env = Environment(loader=loader) + template = env.get_template(schema_file) + return template + + +def render_template(schema_file, obj): + + template = get_template(schema_file) + text = template.render(obj=obj) + + return text.strip() diff --git a/framework/calm/dsl/decompile/schemas/action.py.jinja2 b/framework/calm/dsl/decompile/schemas/action.py.jinja2 new file mode 100644 index 0000000..e1d6ac7 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/action.py.jinja2 @@ -0,0 +1,17 @@ +{%- macro action(obj) -%} +@action +def {{obj.name}}({% if obj.gui_display_name -%}name="{{obj.gui_display_name}}"{%- endif %}): + {% if obj.description -%}"""{{obj.description}}"""{%- endif %} +{% if obj.variables or obj.tasks %} +{%- for variable in obj.variables %} +{{variable | indent( width=4, first=True)}} +{%- endfor %} +{%- for task in obj.tasks %} +{{task | indent( width=4, first=True)}} +{%- endfor %} +{% else %} + pass +{% endif %} +{%- endmacro %} + +{{action(obj)}} diff --git a/framework/calm/dsl/decompile/schemas/ahv_direct_egress_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_direct_egress_nic.py.jinja2 new file mode 100644 index 0000000..9e26a7c --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_direct_egress_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_direct_egress_nic(obj) -%} +AhvVmNic.DirectNic.egress("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_direct_egress_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_direct_ingress_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_direct_ingress_nic.py.jinja2 new file mode 100644 index 0000000..51009bb --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_direct_ingress_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_direct_ingress_nic(obj) -%} +AhvVmNic.DirectNic.ingress("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_direct_ingress_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_direct_tap_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_direct_tap_nic.py.jinja2 new file mode 100644 index 0000000..59a876b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_direct_tap_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_direct_tap_nic(obj) -%} +AhvVmNic.DirectNic.tap("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_direct_tap_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_normal_egress_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_normal_egress_nic.py.jinja2 new file mode 100644 index 0000000..f0d1ee5 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_normal_egress_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_normal_egress_nic(obj) -%} +AhvVmNic.NormalNic.egress("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_normal_egress_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_normal_ingress_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_normal_ingress_nic.py.jinja2 new file mode 100644 index 0000000..a9743c5 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_normal_ingress_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_normal_ingress_nic(obj) -%} +AhvVmNic.NormalNic.ingress("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_normal_ingress_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_normal_tap_nic.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_normal_tap_nic.py.jinja2 new file mode 100644 index 0000000..ae18bef --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_normal_tap_nic.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_normal_tap_nic(obj) -%} +AhvVmNic.NormalNic.tap("{{obj.subnet_name}}" {%- if obj.cluster_name %}, cluster="{{obj.cluster_name}}"{%- elif obj.vpc_name %}, vpc="{{obj.vpc_name}}"{%- endif %}) +{% endmacro %} + +{{ ahv_normal_tap_nic(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm.py.jinja2 new file mode 100644 index 0000000..2e06125 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm.py.jinja2 @@ -0,0 +1,13 @@ +{% macro ahv_vm(obj) %} +{{obj.resources}} + + +class {{obj.name}}(AhvVm): + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} + resources = {{obj.resources_cls_name}} + {% if obj.cluster_name %}cluster = Ref.Cluster(name="{{obj.cluster_name}}"){% endif %} + {% if obj.categories %}categories = {{obj.categories}}{% endif %} +{% endmacro %} + +{{ ahv_vm(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_image.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_image.py.jinja2 new file mode 100644 index 0000000..e02ecc0 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_image.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_cdrom_ide_clone_from_image(obj) -%} +{% if obj.bootable %} +AhvVmDisk.CdRom.Ide.cloneFromImageService("{{obj.name}}", bootable=True) +{% else %} +AhvVmDisk.CdRom.Ide.cloneFromImageService("{{obj.name}}") +{% endif %} +{% endmacro %} + +{{ ahv_vm_cdrom_ide_clone_from_image(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2 new file mode 100644 index 0000000..37d9052 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_cdrom_ide_clone_from_img_pkg(obj) -%} +{% if obj.bootable %} +AhvVmDisk.CdRom.Ide.cloneFromVMDiskPackage({{obj.name}}, bootable=True) +{% else %} +AhvVmDisk.CdRom.Ide.cloneFromVMDiskPackage({{obj.name}}) +{% endif %} +{% endmacro %} + +{{ ahv_vm_cdrom_ide_clone_from_img_pkg(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_empty_cdrom.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_empty_cdrom.py.jinja2 new file mode 100644 index 0000000..265b4fa --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_ide_empty_cdrom.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_vm_cdrom_ide_empty_cdrom(obj) -%} +AhvVmDisk.CdRom.Ide.emptyCdRom() +{% endmacro %} + +{{ ahv_vm_cdrom_ide_empty_cdrom(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_image.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_image.py.jinja2 new file mode 100644 index 0000000..3242de7 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_image.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_cdrom_sata_clone_from_image(obj) -%} +{% if obj.bootable %} +AhvVmDisk.CdRom.Sata.cloneFromImageService("{{obj.name}}", bootable=True) +{% else %} +AhvVmDisk.CdRom.Sata.cloneFromImageService("{{obj.name}}") +{% endif %} +{% endmacro %} + +{{ ahv_vm_cdrom_sata_clone_from_image(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2 new file mode 100644 index 0000000..b1e4cc9 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_cdrom_sata_clone_from_img_pkg(obj) -%} +{% if obj.bootable %} +AhvVmDisk.CdRom.Sata.cloneFromVMDiskPackage({{obj.name}}, bootable=True) +{% else %} +AhvVmDisk.CdRom.Sata.cloneFromVMDiskPackage({{obj.name}}) +{% endif %} +{% endmacro %} + +{{ ahv_vm_cdrom_sata_clone_from_img_pkg(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_empty_cdrom.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_empty_cdrom.py.jinja2 new file mode 100644 index 0000000..c42b224 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cdrom_sata_empty_cdrom.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_vm_cdrom_sata_empty_cdrom(obj) -%} +AhvVmDisk.CdRom.Sata.emptyCdRom() +{% endmacro %} + +{{ ahv_vm_cdrom_sata_empty_cdrom(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_cloud_init.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_cloud_init.py.jinja2 new file mode 100644 index 0000000..2495e47 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_cloud_init.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_cloud_init_gc(obj) %} +AhvVmGC.CloudInit(filename={{obj.filename}}) +{% endmacro %} + +{{ ahv_cloud_init_gc(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_allocate_container.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_allocate_container.py.jinja2 new file mode 100644 index 0000000..b95076f --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_allocate_container.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_vm_disk_scsi_allocate_storage(obj) -%} +AhvVmDisk.Disk.Pci.allocateOnStorageContainer({{obj.size}}) +{% endmacro %} + +{{ ahv_vm_disk_scsi_allocate_storage(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_image.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_image.py.jinja2 new file mode 100644 index 0000000..3ffa796 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_image.py.jinja2 @@ -0,0 +1,10 @@ +{%- macro ahv_vm_disk_pci_clone_from_image(obj) -%} +{% if obj.bootable %} +AhvVmDisk.Disk.Pci.cloneFromImageService("{{obj.name}}", bootable=True) +{% else %} +AhvVmDisk.Disk.Pci.cloneFromImageService("{{obj.name}}") +{% endif %} +{% endmacro %} + + +{{ ahv_vm_disk_pci_clone_from_image(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_pkg.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_pkg.py.jinja2 new file mode 100644 index 0000000..ac3e4d2 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_pci_clone_from_pkg.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_disk_pci_clone_from_img_pkg(obj) -%} +{% if obj.bootable %} +AhvVmDisk.Disk.Pci.cloneFromVMDiskPackage({{obj.name}}, bootable=True) +{% else %} +AhvVmDisk.Disk.Pci.cloneFromVMDiskPackage({{obj.name}}) +{% endif %} +{% endmacro %} + +{{ ahv_vm_disk_pci_clone_from_img_pkg(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_allocate_container.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_allocate_container.py.jinja2 new file mode 100644 index 0000000..507e7b9 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_allocate_container.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ahv_vm_disk_scsi_allocate_storage(obj) -%} +AhvVmDisk.Disk.Scsi.allocateOnStorageContainer({{obj.size}}) +{% endmacro %} + +{{ ahv_vm_disk_scsi_allocate_storage(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_image.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_image.py.jinja2 new file mode 100644 index 0000000..f70ebca --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_image.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_disk_scsi_clone_from_image(obj) -%} +{% if obj.bootable %} +AhvVmDisk.Disk.Scsi.cloneFromImageService("{{obj.name}}", bootable=True) +{% else %} +AhvVmDisk.Disk.Scsi.cloneFromImageService("{{obj.name}}") +{% endif %} +{% endmacro %} + +{{ ahv_vm_disk_scsi_clone_from_image(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_pkg.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_pkg.py.jinja2 new file mode 100644 index 0000000..6ed7471 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_disk_scsi_clone_from_pkg.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro ahv_vm_disk_scsi_clone_from_img_pkg(obj) -%} +{% if obj.bootable %} +AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage({{obj.name}}, bootable=True) +{% else %} +AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage({{obj.name}}) +{% endif %} +{% endmacro %} + +{{ ahv_vm_disk_scsi_clone_from_img_pkg(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_with_domain.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_with_domain.py.jinja2 new file mode 100644 index 0000000..8fa8e80 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_with_domain.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_fresh_sysprep_gc_with_domain(obj) %} +AhvVmGC.Sysprep.FreshScript.withDomain(filename={{obj.filename}},{% if obj.sysprep.domain %} domain = '{{obj.sysprep.domain}}',{% endif %}{% if obj.sysprep.dns_ip %} dns_ip = '{{obj.sysprep.dns_ip}}',{% endif %}{% if obj.sysprep.dns_search_path %} dns_search_path = '{{obj.sysprep.dns_search_path}}',{% endif %}{% if obj.sysprep.credential %} credential = '{{obj.sysprep.credential}}',{% endif %}) +{% endmacro %} + +{{ ahv_fresh_sysprep_gc_with_domain(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_without_domain.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_without_domain.py.jinja2 new file mode 100644 index 0000000..13841a6 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_fresh_sysprep_without_domain.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_fresh_sysprep_gc_without_domain(obj) %} +AhvVmGC.Sysprep.FreshScript.withoutDomain(filename={{obj.filename}}) +{% endmacro %} + +{{ ahv_fresh_sysprep_gc_without_domain(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_gpu.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_gpu.py.jinja2 new file mode 100644 index 0000000..6bf35e9 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_gpu.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_gpu(obj) %} +AhvVmGpu.{{obj.vendor_key}}.{{obj.mode_key}}({% if obj.device_id and obj.device_id != -1 %}device_id={{obj.device_id}}{% endif %}) +{% endmacro %} + +{{ ahv_gpu(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_with_domain.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_with_domain.py.jinja2 new file mode 100644 index 0000000..de76ca6 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_with_domain.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_prepared_sysprep_gc_with_domain(obj) %} +AhvVmGC.Sysprep.PreparedScript.withDomain(filename={{obj.filename}},{% if obj.sysprep.domain %} domain = '{{obj.sysprep.domain}}',{% endif %}{% if obj.sysprep.dns_ip %} dns_ip = '{{obj.sysprep.dns_ip}}',{% endif %}{% if obj.sysprep.dns_search_path %} dns_search_path = '{{obj.sysprep.dns_search_path}}',{% endif %}{% if obj.sysprep.credential %} credential = '{{obj.sysprep.credential}}',{% endif %}) +{% endmacro %} + +{{ ahv_prepared_sysprep_gc_with_domain(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_without_domain.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_without_domain.py.jinja2 new file mode 100644 index 0000000..c07c0ad --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_prepared_sysprep_without_domain.py.jinja2 @@ -0,0 +1,5 @@ +{% macro ahv_prepared_sysprep_gc_without_domain(obj) %} +AhvVmGC.Sysprep.PreparedScript.withoutDomain(filename={{obj.filename}}) +{% endmacro %} + +{{ ahv_prepared_sysprep_gc_without_domain(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ahv_vm_resources.py.jinja2 b/framework/calm/dsl/decompile/schemas/ahv_vm_resources.py.jinja2 new file mode 100644 index 0000000..ac07767 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ahv_vm_resources.py.jinja2 @@ -0,0 +1,14 @@ +{% macro ahv_vm_resources(obj) %} +class {{obj.name}}(AhvVmResources): + + memory = {{obj.memory}} + vCPUs = {{obj.vCPUs}} + cores_per_vCPU = {{obj.cores_per_vCPU}} + {% if obj.disks %}disks = [{{obj.disks}}]{% endif %} + {% if obj.nics %}nics = [{{obj.nics}}]{% endif %} + {% if obj.gpus %}gpus = [{{obj.gpus}}]{% endif %} + {% if obj.guest_customization %}guest_customization = {{obj.guest_customization}}{% endif %} + {% if obj.serial_ports %}serial_ports = {{obj.serial_ports}}{% endif %} +{% endmacro %} + +{{ ahv_vm_resources(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/basic_credential.py.jinja2 b/framework/calm/dsl/decompile/schemas/basic_credential.py.jinja2 new file mode 100644 index 0000000..a2d2bc9 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/basic_credential.py.jinja2 @@ -0,0 +1,9 @@ +{% macro cred(obj) %} +{% if obj.default %} +{{obj.var_name}} = basic_cred('{{obj.username}}', {{obj.value}}, name='{{obj.name}}', type='{{obj.type}}', default=True, {% if obj.editables %}editables = {{obj.editables}}{%- endif %}) +{% else %} +{{obj.var_name}} = basic_cred('{{obj.username}}', {{obj.value}}, name='{{obj.name}}', type='{{obj.type}}', {% if obj.editables %}editables = {{obj.editables}}{%- endif %}) +{% endif %} +{% endmacro %} + +{{ cred(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/blueprint.py.jinja2 b/framework/calm/dsl/decompile/schemas/blueprint.py.jinja2 new file mode 100644 index 0000000..4f2f715 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/blueprint.py.jinja2 @@ -0,0 +1,12 @@ +{% macro blueprint(obj) %} +class {{obj.name}}(Blueprint): + {% if obj.description -%}"""{{obj.description}}"""{% endif %} + {% if obj.services %}services = [{{obj.services}}]{% endif %} + {% if obj.packages %}packages = [{{obj.packages}}]{% endif %} + {% if obj.substrates %}substrates = [{{obj.substrates}}]{% endif %} + {% if obj.profiles %}profiles = [{{obj.profiles}}]{% endif %} + {% if obj.credentials %}credentials = [{{obj.credentials}}]{% endif %} +{% endmacro %} + +{{ blueprint(obj) }} + \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/bp_file_helper.py.jinja2 b/framework/calm/dsl/decompile/schemas/bp_file_helper.py.jinja2 new file mode 100644 index 0000000..d809e69 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/bp_file_helper.py.jinja2 @@ -0,0 +1,37 @@ +{% macro bp_file_helper(obj) %} +# THIS FILE IS AUTOMATICALLY GENERATED. +# Disclaimer: Please test this file before using in production. +""" +Generated blueprint DSL (.py) +""" + +import json #no_qa +import os #no_qa + +from calm.dsl.builtins import * #no_qa + + +# Secret Variables +{%- for var_file in obj.secret_files %} +{{var_file}} = read_local_file('{{var_file}}') +{%- endfor %} + +# Credentials +{%- for cred in obj.credentials %} +{{cred}} +{%- endfor %} + +{% for vm_image in obj.vm_images %} +{{vm_image}} +{% endfor %} + +{% for entity in obj.dependent_entities %} +{{entity}} +{% endfor %} +{{obj.blueprint}} + +{% if obj.metadata %}{{obj.metadata}}{%- endif %} + +{% endmacro %} + +{{bp_file_helper(obj)}} diff --git a/framework/calm/dsl/decompile/schemas/deployment.py.jinja2 b/framework/calm/dsl/decompile/schemas/deployment.py.jinja2 new file mode 100644 index 0000000..83378a5 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/deployment.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro deployment(obj) %} +class {{obj.name}}(Deployment): + {% if obj.description %}"""{{obj.description}}"""{% endif %} + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} + min_replicas = '{{obj.min_replicas}}' + max_replicas = '{{obj.max_replicas}}' + {% if obj.default_replicas %}default_replicas = '{{obj.default_replicas}}'{%- endif %} + {% if obj.dependencies %}dependencies = [{{obj.dependencies}}]{%- endif %} + {% if obj.packages %}packages = [{{obj.packages}}]{%- endif %} + {% if obj.substrate %}substrate = {{obj.substrate}}{%- endif %} + {% if obj.editables %}editables = {{obj.editables}}{%- endif %} +{%- endmacro %} + +{{ deployment(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/dynamic_credential.py.jinja2 b/framework/calm/dsl/decompile/schemas/dynamic_credential.py.jinja2 new file mode 100644 index 0000000..32e0302 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/dynamic_credential.py.jinja2 @@ -0,0 +1,17 @@ +{% macro var_dict_obj(var_list) %} +{ +{% for var in var_list %} +'{{var.name}}': {% if var.type == "SECRET" %} {{var.value}} {% else %} '{{var.value}}' {% endif %}, +{% endfor %} +} +{% endmacro %} + +{% macro cred(obj) %} +{% if obj.default %} +{{obj.var_name}} = dynamic_cred('{{obj.username}}', Ref.Account('{{obj.account}}'), {% if obj.resource_type %}resource_type=Ref.Resource_Type('{{obj.resource_type}}'){%- endif %}, variable_dict={{var_dict_obj(obj.variable_list)|indent(2)}}, name='{{obj.name}}', default=True, type='{{obj.type}}', {% if obj.editables %}editables = {{obj.editables}}{%- endif %}) +{% else %} +{{obj.var_name}} = dynamic_cred('{{obj.username}}', Ref.Account('{{obj.account}}'), {% if obj.resource_type %}resource_type=Ref.Resource_Type('{{obj.resource_type}}'){%- endif %}, variable_dict={{var_dict_obj(obj.variable_list)|indent(2)}}, name='{{obj.name}}', type='{{obj.type}}', {% if obj.editables %}editables = {{obj.editables}}{%- endif %}) +{% endif %} +{% endmacro %} + +{{ cred(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/metadata.py.jinja2 b/framework/calm/dsl/decompile/schemas/metadata.py.jinja2 new file mode 100644 index 0000000..8eeda55 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/metadata.py.jinja2 @@ -0,0 +1,7 @@ +{% macro metadata(obj) -%} +class BpMetadata(Metadata): + + {% if obj.categories %}categories={{obj.categories}}{% endif %} +{%- endmacro %} + +{{ metadata(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/package.py.jinja2 b/framework/calm/dsl/decompile/schemas/package.py.jinja2 new file mode 100644 index 0000000..f3017ec --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/package.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro package(obj) -%} +class {{obj.name}}(Package): + {% if obj.description -%}"""{{obj.description}}"""{%- endif %} + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} + {% if obj.services %}services=[{{obj.services}}]{%- endif %} +{%- for variable in obj.variables %} + {{variable}} +{% endfor -%} +{% for action in obj.actions %} +{{action | indent( width=4, first=True)}} +{% endfor %} +{%- endmacro %} + +{{ package(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/parallel_task.py.jinja2 b/framework/calm/dsl/decompile/schemas/parallel_task.py.jinja2 new file mode 100644 index 0000000..eabd436 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/parallel_task.py.jinja2 @@ -0,0 +1,8 @@ +{%- macro parallel_task(obj) -%} +with parallel(): +{%- for task in obj.tasks %} +{{task | indent( width=4, first=True)}} +{%- endfor %} +{%- endmacro %} + +{{parallel_task(obj)}} diff --git a/framework/calm/dsl/decompile/schemas/profile.py.jinja2 b/framework/calm/dsl/decompile/schemas/profile.py.jinja2 new file mode 100644 index 0000000..3fd5f3b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/profile.py.jinja2 @@ -0,0 +1,17 @@ +{%- macro profile(obj) %} +class {{obj.name}}(Profile): + {% if obj.description %}"""{{obj.description}}"""{% endif %} + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} + {% if obj.deployments %}deployments = [{{obj.deployments}}]{%- endif %} + {% if obj.restore_configs %}restore_configs = [{{obj.restore_configs}}]{%- endif %} + {% if obj.snapshot_configs %}snapshot_configs = [{{obj.snapshot_configs}}]{%- endif %} +{% for variable in obj.variables %} +{{variable | indent( width=4, first=True)}} +{% endfor -%} +{% for action in obj.actions %} +{{action | indent( width=4, first=True)}} +{%- endfor %} +{%- endmacro %} + +{{ profile(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/readiness_probe.py.jinja2 b/framework/calm/dsl/decompile/schemas/readiness_probe.py.jinja2 new file mode 100644 index 0000000..5b85f7a --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/readiness_probe.py.jinja2 @@ -0,0 +1,5 @@ +{% macro readiness_probe(obj) %} +readiness_probe(connection_type="{{obj.connection_type}}", disabled={{obj.disabled}}{% if obj.retries %}, retries="{{obj.retries}}"{% endif %}{% if obj.connection_port %}, connection_port={{obj.connection_port}}{% endif %}{% if obj.address %}, address="{{obj.address}}"{% endif %}{% if obj.delay_secs %}, delay_secs="{{obj.delay_secs}}"{% endif %}{% if obj.credential %}, credential={{obj.credential}}{% endif %}{% if obj.editables_list %}, editables_list={{obj.editables_list}}{% endif %}) +{%- endmacro %} + +{{ readiness_probe(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/ref.py.jinja2 b/framework/calm/dsl/decompile/schemas/ref.py.jinja2 new file mode 100644 index 0000000..802c929 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/ref.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro ref(obj) -%} +ref({{obj.name}}) +{%- endmacro %} + +{{ref(obj)}} diff --git a/framework/calm/dsl/decompile/schemas/restore_config.py.jinja2 b/framework/calm/dsl/decompile/schemas/restore_config.py.jinja2 new file mode 100644 index 0000000..9415ab4 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/restore_config.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro restore_config(obj) %} +AppProtection.RestoreConfig(name='{{obj.name}}', target=ref({{obj.target}}), delete_vm_post_restore={{obj.delete_vm_post_restore}}) +{%- endmacro %} + +{{ restore_config(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/service.py.jinja2 b/framework/calm/dsl/decompile/schemas/service.py.jinja2 new file mode 100644 index 0000000..7052342 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/service.py.jinja2 @@ -0,0 +1,19 @@ +{% macro service(obj) -%} +class {{obj.name}}(Service): + {% if obj.description %}"""{{obj.description}}"""{% endif %} + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} +{% if obj.dependencies or obj.variables or obj.actions %} + {% if obj.dependencies %}dependencies=[{{obj.dependencies}}]{% endif %} +{% for variable in obj.variables %} +{{variable | indent( width=4, first=True)}} +{% endfor -%} +{% for action in obj.actions %} +{{action | indent( width=4, first=True)}} +{% endfor %} +{% else %} + pass +{% endif %} +{%- endmacro %} + +{{ service(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/snapshot_config.py.jinja2 b/framework/calm/dsl/decompile/schemas/snapshot_config.py.jinja2 new file mode 100644 index 0000000..1008b6c --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/snapshot_config.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro snapshot_config(obj) %} +{% if obj.policy %} +{% if obj.rule %} +AppProtection.SnapshotConfig(name='{{obj.name}}', target=ref({{obj.target}}), restore_config=ref({{obj.restore_config}}), policy=AppProtection.ProtectionPolicy('{{obj.policy}}', rule='{{obj.rule}}')) +{% else %} +AppProtection.SnapshotConfig(name='{{obj.name}}', target=ref({{obj.target}}), restore_config=ref({{obj.restore_config}}), policy=AppProtection.ProtectionPolicy('{{obj.policy}}')) +{% endif %} +{% else %} +AppProtection.SnapshotConfig(name='{{obj.name}}', target=ref({{obj.target}}), restore_config=ref({{obj.restore_config}})) +{% endif %} +{%- endmacro %} + +{{ snapshot_config(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/substrate.py.jinja2 b/framework/calm/dsl/decompile/schemas/substrate.py.jinja2 new file mode 100644 index 0000000..4a577e1 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/substrate.py.jinja2 @@ -0,0 +1,17 @@ +{%- macro substrate(obj) %} +class {{obj.name}}(Substrate): + {% if obj.description -%}"""{{obj.description}}"""{%- endif %} + + {% if obj.gui_display_name %}name="{{obj.gui_display_name}}"{% endif %} + os_type = '{{obj.os_type}}' + provider_type = '{{obj.provider_type}}' + provider_spec = {{obj.provider_spec}} + {% if obj.provider_spec_editables%}provider_spec_editables = {{obj.provider_spec_editables}}{%- endif %} + readiness_probe = {{obj.readiness_probe}} + {% if obj.readiness_probe_cred -%}readiness_probe["credential"] = {{obj.readiness_probe_cred}}{%- endif %} +{% for action in obj.actions %} +{{action | indent( width=4, first=True)}} +{% endfor %} +{%- endmacro %} + +{{ substrate(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_call_config.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_call_config.py.jinja2 new file mode 100644 index 0000000..e628565 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_call_config.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro call_config_task(obj) -%} +CalmTask.ConfigExec(name='{{obj.name}}', config=ref({{obj.config}})) +{%- endmacro %} + +{{ call_config_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_call_runbook.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_call_runbook.py.jinja2 new file mode 100644 index 0000000..145a324 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_call_runbook.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro runbook_task(obj) -%} +{{obj.target}}.{{obj.action}}(name='{{obj.name}}') +{%- endmacro %} + +{{ runbook_task(obj) }} \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/task_delay.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_delay.py.jinja2 new file mode 100644 index 0000000..6de05d6 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_delay.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro delay_task(obj) -%} +{%- if obj.target is not defined %} +CalmTask.Delay(name='{{obj.name}}', delay_seconds={{obj.delay_seconds}}) +{%- else %} +CalmTask.Delay(name='{{obj.name}}', delay_seconds={{obj.delay_seconds}}, target={{obj.target}}) +{%- endif %} +{%- endmacro %} + +{{ delay_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 new file mode 100644 index 0000000..a6765da --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro exec_escript_task(obj) -%} +{%- if obj.cred is not defined and obj.target is not defined %} +CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.cred is not defined %} +CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ exec_escript_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_exec_powershell.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_exec_powershell.py.jinja2 new file mode 100644 index 0000000..b811890 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_exec_powershell.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro exec_powershell_task(obj) -%} +{%- if obj.cred is not defined and obj.target is not defined %} +CalmTask.Exec.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}) +{%- elif obj.cred is not defined %} +CalmTask.Exec.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}}) +{%- elif obj.target is not defined %} +CalmTask.Exec.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}) +{%- else %} +CalmTask.Exec.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}) +{%- endif %} +{%- endmacro %} + +{{ exec_powershell_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_exec_ssh.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_exec_ssh.py.jinja2 new file mode 100644 index 0000000..dd58ce2 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_exec_ssh.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro exec_ssh_task(obj) -%} +{%- if obj.cred is not defined and obj.target is not defined %} +CalmTask.Exec.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}) +{%- elif obj.cred is not defined %} +CalmTask.Exec.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}}) +{%- elif obj.target is not defined %} +CalmTask.Exec.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}) +{%- else %} +CalmTask.Exec.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}) +{%- endif %} +{%- endmacro %} + +{{ exec_ssh_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_http_delete.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_http_delete.py.jinja2 new file mode 100644 index 0000000..cee03c0 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_http_delete.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro http_delete_task(obj) -%} +{%- if obj.target is not defined and obj.attrs.request_body is not defined %} +CalmTask.HTTP.delete('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.HTTP.delete('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.attrs.request_body is not defined %} +CalmTask.HTTP.delete('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.HTTP.delete('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ http_delete_task(obj) }} \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/task_http_get.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_http_get.py.jinja2 new file mode 100644 index 0000000..ed4a42b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_http_get.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro http_get_task(obj) -%} +{%- if obj.target is not defined %} +CalmTask.HTTP.get('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference %}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.HTTP.get('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference %}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ http_get_task(obj) }} \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/task_http_post.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_http_post.py.jinja2 new file mode 100644 index 0000000..c678bf5 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_http_post.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro http_post_task(obj) -%} +{%- if obj.target is not defined and obj.attrs.request_body is not defined %} +CalmTask.HTTP.post('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.HTTP.post('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.attrs.request_body is not defined %} +CalmTask.HTTP.post('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.HTTP.post('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ http_post_task(obj) }} \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/task_http_put.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_http_put.py.jinja2 new file mode 100644 index 0000000..99097e6 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_http_put.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro http_put_task(obj) -%} +{%- if obj.target is not defined and obj.attrs.request_body is not defined %} +CalmTask.HTTP.put('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.HTTP.put('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}'{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.attrs.request_body is not defined %} +CalmTask.HTTP.put('{{obj.attrs.url}}', headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.HTTP.put('{{obj.attrs.url}}', body=json.dumps({{obj.attrs.request_body}}), headers={{obj.headers}}, secret_headers={{obj.secret_headers}} , content_type='{{obj.attrs.content_type}}', verify={{obj.attrs.tls_verify}}, status_mapping={{obj.status_mapping}}, response_paths={{obj.response_paths}}, name='{{obj.name}}', target={{obj.target}}{%- if obj.cred %}, cred={{obj.cred}}{%- endif %} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ http_put_task(obj) }} \ No newline at end of file diff --git a/framework/calm/dsl/decompile/schemas/task_scaling_scalein.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_scaling_scalein.py.jinja2 new file mode 100644 index 0000000..dca7873 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_scaling_scalein.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro scaling_scalein_task(obj) -%} +{%- if obj.target is not defined %} +CalmTask.Scaling.scale_in('{{obj.scaling_count}}', name='{{obj.name}}') +{%- else %} +CalmTask.Scaling.scale_in('{{obj.scaling_count}}', name='{{obj.name}}', target={{obj.target}}) +{%- endif %} +{%- endmacro %} + +{{ scaling_scalein_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_scaling_scaleout.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_scaling_scaleout.py.jinja2 new file mode 100644 index 0000000..b3d5d51 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_scaling_scaleout.py.jinja2 @@ -0,0 +1,9 @@ +{% macro scaling_scaleout_task(obj) -%} +{% if obj.target is not defined %} +CalmTask.Scaling.scale_out('{{obj.scaling_count}}', name='{{obj.name}}') +{% else %} +CalmTask.Scaling.scale_out('{{obj.scaling_count}}', name='{{obj.name}}', target={{obj.target}}) +{% endif %} +{%- endmacro %} + +{{ scaling_scaleout_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 new file mode 100644 index 0000000..813b93f --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro setvariable_escript_task(obj) -%} +{%- if obj.cred is not defined and obj.target and obj.variables is not defined %} +CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.cred is not defined %} +CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.variables is not defined %} +CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ setvariable_escript_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_setvariable_powershell.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_setvariable_powershell.py.jinja2 new file mode 100644 index 0000000..8e58b1b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_setvariable_powershell.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro setvariable_powershell_task(obj) -%} +{%- if obj.cred is not defined and obj.target and obj.variables is not defined %} +CalmTask.SetVariable.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}) +{%- elif obj.cred is not defined %} +CalmTask.SetVariable.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}}, variables={{obj.variables}}) +{%- elif obj.target is not defined %} +CalmTask.SetVariable.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}}) +{%- elif obj.variables is not defined %} +CalmTask.SetVariable.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}) +{%- else %} +CalmTask.SetVariable.powershell(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}}) +{%- endif %} +{%- endmacro %} + +{{ setvariable_powershell_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/task_setvariable_ssh.py.jinja2 b/framework/calm/dsl/decompile/schemas/task_setvariable_ssh.py.jinja2 new file mode 100644 index 0000000..d535957 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/task_setvariable_ssh.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro setvariable_ssh_task(obj) -%} +{%- if obj.cred is not defined and obj.target and obj.variables is not defined %} +CalmTask.SetVariable.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}) +{%- elif obj.cred is not defined %} +CalmTask.SetVariable.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}}, variables={{obj.variables}}) +{%- elif obj.target is not defined %} +CalmTask.SetVariable.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}}) +{%- elif obj.variables is not defined %} +CalmTask.SetVariable.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}) +{%- else %} +CalmTask.SetVariable.ssh(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}}) +{%- endif %} +{%- endmacro %} + +{{ setvariable_ssh_task(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_date.py.jinja2 new file mode 100644 index 0000000..ac5742c --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.date('{{obj.value}}', label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.date('{{obj.value}}', label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_datetime.py.jinja2 new file mode 100644 index 0000000..35eab27 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.datetime('{{obj.value}}', label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.datetime('{{obj.value}}', label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_int.py.jinja2 new file mode 100644 index 0000000..f866650 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.int('{{obj.value}}', label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.int('{{obj.value}}', label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_multiline.py.jinja2 new file mode 100644 index 0000000..5d70b6a --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.multiline({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.multiline({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_date.py.jinja2 new file mode 100644 index 0000000..8b0b230 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_secret_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret.date({{obj.value}}, label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret.date({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_datetime.py.jinja2 new file mode 100644 index 0000000..6d8eb7a --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_secret_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret.datetime({{obj.value}}, label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret.datetime({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_int.py.jinja2 new file mode 100644 index 0000000..0cd37fa --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_secret_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret.int({{obj.value}}, label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret.int({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_multiline.py.jinja2 new file mode 100644 index 0000000..9701491 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_secret_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret.multiline({{obj.value}}, label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret.multiline({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_string.py.jinja2 new file mode 100644 index 0000000..e9ac41e --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_secret_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_secret_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_secret_time.py.jinja2 new file mode 100644 index 0000000..d657cd8 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_secret_time.py.jinja2 @@ -0,0 +1,10 @@ +{%- macro simple_secret_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.Secret.time({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.Secret.time({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_secret_time_var(obj) }} + diff --git a/framework/calm/dsl/decompile/schemas/var_simple_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_string.py.jinja2 new file mode 100644 index 0000000..78d00d6 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple('{{obj.value}}', label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple('{{obj.value}}', label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_simple_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_simple_time.py.jinja2 new file mode 100644 index 0000000..8f67115 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_simple_time.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro simple_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.Simple.time('{{obj.value}}', label='{{obj.label}}',regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.Simple.time('{{obj.value}}', label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ simple_time_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_date.py.jinja2 new file mode 100644 index 0000000..fcd0e64 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.date({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.date({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_datetime.py.jinja2 new file mode 100644 index 0000000..423deed --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.datetime({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.datetime({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_int.py.jinja2 new file mode 100644 index 0000000..804de10 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.int({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.int({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_multiline.py.jinja2 new file mode 100644 index 0000000..b668b46 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.multiline({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.multiline({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_string.py.jinja2 new file mode 100644 index 0000000..9cc54ed --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_time.py.jinja2 new file mode 100644 index 0000000..f659ff1 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_array_time.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_array_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.time({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.Array.time({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_array_time_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_date.py.jinja2 new file mode 100644 index 0000000..3094f45 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.date({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.date({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_datetime.py.jinja2 new file mode 100644 index 0000000..b6e9fab --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.datetime({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.datetime({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_int.py.jinja2 new file mode 100644 index 0000000..d89e618 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.int({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.int({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_multiline.py.jinja2 new file mode 100644 index 0000000..33c077f --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.multiline({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.multiline({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_string.py.jinja2 new file mode 100644 index 0000000..edaffdd --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_time.py.jinja2 new file mode 100644 index 0000000..d127c67 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_fromTask_time.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_from_task_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.time({{obj.value}}, label='{{obj.label}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.FromTask.time({{obj.value}}, label='{{obj.label}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_from_task_time_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_date.py.jinja2 new file mode 100644 index 0000000..0a1970b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.date({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.date({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_datetime.py.jinja2 new file mode 100644 index 0000000..1715154 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.datetime({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.datetime({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_int.py.jinja2 new file mode 100644 index 0000000..4fc902b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.int({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.int({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_multiline.py.jinja2 new file mode 100644 index 0000000..59c6424 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.multiline({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.multiline({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_string.py.jinja2 new file mode 100644 index 0000000..fe63fcc --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_time.py.jinja2 new file mode 100644 index 0000000..4c967da --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_array_time.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_array_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.time({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.Array.time({{obj.choices}}, label='{{obj.label}}', defaults={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_array_time_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_date.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_date.py.jinja2 new file mode 100644 index 0000000..3eb919f --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_date.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_date_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.date({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.date({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_date_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_datetime.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_datetime.py.jinja2 new file mode 100644 index 0000000..a39030f --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_datetime.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_datetime_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.datetime({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.datetime({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_datetime_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_int.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_int.py.jinja2 new file mode 100644 index 0000000..e70f870 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_int.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_int_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.int({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.int({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_int_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_multiline.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_multiline.py.jinja2 new file mode 100644 index 0000000..364edc1 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_multiline.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_multiline_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.multiline({{obj.choices}}, label='{{obj.label}}', default={{obj.value}}, regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.multiline({{obj.choices}}, label='{{obj.label}}', default={{obj.value}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_multiline_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_string.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_string.py.jinja2 new file mode 100644 index 0000000..70e8910 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_string.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_string_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_string_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/var_with_options_predefined_time.py.jinja2 b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_time.py.jinja2 new file mode 100644 index 0000000..03d5d8b --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/var_with_options_predefined_time.py.jinja2 @@ -0,0 +1,9 @@ +{%- macro with_options_predefined_time_var(obj) -%} +{%- if obj.regex %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.time({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', regex='{{obj.regex}}', validate_regex={{obj.validate_regex}}, is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- else %} +{{obj.name}} = CalmVariable.WithOptions.Predefined.time({{obj.choices}}, label='{{obj.label}}', default='{{obj.value}}', is_mandatory={{obj.is_mandatory}}, is_hidden={{obj.is_hidden}}, runtime={{obj.runtime}}, description='{{obj.description}}') +{%- endif %} +{%- endmacro %} + +{{ with_options_predefined_time_var(obj) }} diff --git a/framework/calm/dsl/decompile/schemas/vm_disk_package.py.jinja2 b/framework/calm/dsl/decompile/schemas/vm_disk_package.py.jinja2 new file mode 100644 index 0000000..39577b1 --- /dev/null +++ b/framework/calm/dsl/decompile/schemas/vm_disk_package.py.jinja2 @@ -0,0 +1,5 @@ +{%- macro package(obj) -%} +{{obj.name}} = vm_disk_package(name='{{obj.name}}', description='{{obj.description}}', config={{obj.config}}) +{%- endmacro %} + +{{ package(obj) }} diff --git a/framework/calm/dsl/decompile/service.py b/framework/calm/dsl/decompile/service.py new file mode 100644 index 0000000..4165c42 --- /dev/null +++ b/framework/calm/dsl/decompile/service.py @@ -0,0 +1,72 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import ServiceType +from calm.dsl.decompile.ref import render_ref_template +from calm.dsl.decompile.variable import render_variable_template +from calm.dsl.decompile.action import render_action_template, update_runbook_action_map +from calm.dsl.decompile.ref_dependency import update_service_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_service_template(cls): + + LOG.debug("Rendering {} service template".format(cls.__name__)) + if not isinstance(cls, ServiceType): + raise TypeError("{} is not of type {}".format(cls, ServiceType)) + + # Entity context + entity_context = "Service_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + # Update service name map and gui name + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + # updating ui and dsl name mapping + update_service_name(gui_display_name, cls.__name__) + + depends_on_list = [] + for entity in user_attrs.get("dependencies", []): + depends_on_list.append(render_ref_template(entity)) + + variable_list = [] + for entity in user_attrs.get("variables", []): + variable_list.append(render_variable_template(entity, entity_context)) + + action_list = [] + system_actions = {v: k for k, v in ServiceType.ALLOWED_SYSTEM_ACTIONS.items()} + + for entity in user_attrs.get("actions", []): + if entity.__name__ in list(system_actions.keys()): + entity.name = system_actions[entity.__name__] + entity.__name__ = system_actions[entity.__name__] + + # Registering service action runbooks earlier as they can be called by service tasks also. Ex: + # class SampleService + # def __create__(): + # PHPService.__restart__() + + action_runbook = entity.runbook + action_runbook_name = ( + getattr(action_runbook, "name", "") or action_runbook.__name__ + ) + update_runbook_action_map(action_runbook_name, entity.__name__) + + for entity in user_attrs.get("actions", []): + rendered_txt = render_action_template(entity, entity_context) + if rendered_txt: + action_list.append(rendered_txt) + + user_attrs["dependencies"] = ",".join(depends_on_list) + user_attrs["variables"] = variable_list + user_attrs["actions"] = action_list + + # TODO add ports, ..etc. + + text = render_template("service.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/decompile/substrate.py b/framework/calm/dsl/decompile/substrate.py new file mode 100644 index 0000000..42ed2d7 --- /dev/null +++ b/framework/calm/dsl/decompile/substrate.py @@ -0,0 +1,158 @@ +from ruamel import yaml +import os + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.action import render_action_template +from calm.dsl.decompile.readiness_probe import render_readiness_probe_template +from calm.dsl.decompile.file_handler import get_specs_dir, get_specs_dir_key +from calm.dsl.builtins import SubstrateType, get_valid_identifier +from calm.dsl.decompile.ahv_vm import render_ahv_vm +from calm.dsl.decompile.ref_dependency import update_substrate_name +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_substrate_template(cls, vm_images=[]): + + LOG.debug("Rendering {} substrate template".format(cls.__name__)) + if not isinstance(cls, SubstrateType): + raise TypeError("{} is not of type {}".format(cls, SubstrateType)) + + # Entity context + entity_context = "Substrate_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.__name__ + user_attrs["description"] = cls.__doc__ or "" + + # Update substrate name map and gui name + gui_display_name = getattr(cls, "name", "") or cls.__name__ + if gui_display_name != cls.__name__: + user_attrs["gui_display_name"] = gui_display_name + + # updating ui and dsl name mapping + update_substrate_name(gui_display_name, cls.__name__) + + provider_spec_editables = user_attrs.get("provider_spec_editables", {}) + create_spec_editables = provider_spec_editables.get("create_spec", {}) + readiness_probe_editables = provider_spec_editables.get("readiness_probe", {}) + + # Handle readiness probe for substrate + rp_editable_list = [] + for k, v in readiness_probe_editables.items(): + if v: + rp_editable_list.append(k) + + # Appending readiness_probe editables to readiness_probe object + readiness_probe = user_attrs["readiness_probe"] + readiness_probe.editables_list = rp_editable_list + user_attrs["readiness_probe"] = render_readiness_probe_template( + user_attrs["readiness_probe"] + ) + + spec_dir = get_specs_dir() + + # Handle create spec runtime editables + if create_spec_editables: + create_spec_editable_file_name = cls.__name__ + "_create_spec_editables.yaml" + file_location = os.path.join(spec_dir, create_spec_editable_file_name) + dsl_file_location_alias = "os.path.join('{}', '{}')".format( + get_specs_dir_key(), create_spec_editable_file_name + ) + user_attrs["provider_spec_editables"] = "read_spec({})".format( + dsl_file_location_alias + ) + + # Write editable spec to separate file + with open(file_location, "w+") as fd: + fd.write(yaml.dump(create_spec_editables, default_flow_style=False)) + + # Handle provider_spec for substrate + provider_spec = cls.provider_spec + if cls.provider_type == "AHV_VM": + # Provider Spec is converted to ahv vm class in substrate decompile method only + boot_config = getattr(provider_spec.resources, "boot_config", {}) + user_attrs["provider_spec"] = provider_spec.__name__ + ahv_vm_str = render_ahv_vm(provider_spec, boot_config) + + else: + # creating a file for storing provider_spec + provider_spec_file_name = cls.__name__ + "_provider_spec.yaml" + user_attrs["provider_spec"] = get_provider_spec_string( + spec=provider_spec, + filename=provider_spec_file_name, + provider_type=cls.provider_type, + vm_images=vm_images, + ) + + # Write provider spec to separate file + file_location = os.path.join(spec_dir, provider_spec_file_name) + with open(file_location, "w+") as fd: + fd.write(yaml.dump(provider_spec, default_flow_style=False)) + + # Actions + action_list = [] + system_actions = {v: k for k, v in SubstrateType.ALLOWED_FRAGMENT_ACTIONS.items()} + for action in user_attrs.get("actions", []): + if action.__name__ in list(system_actions.keys()): + action.name = system_actions[action.__name__] + action.__name__ = system_actions[action.__name__] + action_list.append(render_action_template(action, entity_context)) + + user_attrs["actions"] = action_list + + substrate_text = render_template(schema_file="substrate.py.jinja2", obj=user_attrs) + if cls.provider_type == "AHV_VM": + # Append definition for ahv vm class on top of substrate class + substrate_text = "{}\n{}".format(ahv_vm_str, substrate_text) + + return substrate_text.strip() + + +def get_provider_spec_string(spec, filename, provider_type, vm_images): + + # TODO add switch to use YAML_file/Helper_class for ahv provider + dsl_file_location_alias = "os.path.join('{}', '{}')".format( + get_specs_dir_key(), filename + ) + if provider_type == "AHV_VM": + disk_list = spec["resources"]["disk_list"] + + disk_ind_img_map = {} + for ind, disk in enumerate(disk_list): + data_source_ref = disk.get("data_source_reference", {}) + if data_source_ref: + if data_source_ref.get("kind") == "app_package": + disk_ind_img_map[ind + 1] = get_valid_identifier( + data_source_ref.get("name") + ) + data_source_ref.pop("uuid", None) + + disk_pkg_string = "" + for k, v in disk_ind_img_map.items(): + disk_pkg_string += ",{}: {}".format(k, v) + if disk_pkg_string.startswith(","): + disk_pkg_string = disk_pkg_string[1:] + disk_pkg_string = "{" + disk_pkg_string + "}" + + res = "read_ahv_spec({}, disk_packages = {})".format( + dsl_file_location_alias, disk_pkg_string + ) + + elif provider_type == "VMWARE_VM": + spec_template = get_valid_identifier(spec["template"]) + + if spec_template in vm_images: + spec["template"] = "" + res = "read_vmw_spec({}, vm_template={})".format( + dsl_file_location_alias, spec_template + ) + + else: + res = "read_vmw_spec({})".format(dsl_file_location_alias) + + else: + res = "read_provider_spec({})".format(dsl_file_location_alias) + + return res diff --git a/framework/calm/dsl/decompile/task.py b/framework/calm/dsl/decompile/task.py new file mode 100644 index 0000000..28bef33 --- /dev/null +++ b/framework/calm/dsl/decompile/task.py @@ -0,0 +1,189 @@ +import os + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.ref import render_ref_template +from calm.dsl.decompile.credential import get_cred_var_name +from calm.dsl.decompile.file_handler import get_scripts_dir, get_scripts_dir_key +from calm.dsl.builtins import TaskType +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def render_task_template( + cls, entity_context="", RUNBOOK_ACTION_MAP={}, CONFIG_SPEC_MAP={} +): + + LOG.debug("Rendering {} task template".format(cls.name)) + if not isinstance(cls, TaskType): + raise TypeError("{} is not of type {}".format(cls, TaskType)) + + # update entity_context + entity_context = entity_context + "_Task_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["name"] = cls.name + + target = getattr(cls, "target_any_local_reference", None) + if target: # target will be modified to have correct name(DSL name) + user_attrs["target"] = render_ref_template(target) + + cred = cls.attrs.get("login_credential_local_reference", None) + if cred: + user_attrs["cred"] = "ref({})".format( + get_cred_var_name(getattr(cred, "name", "") or cred.__name__) + ) + + if cls.type == "EXEC": + script_type = cls.attrs["script_type"] + cls.attrs["script_file"] = create_script_file( + script_type, cls.attrs["script"], entity_context + ) + + if script_type == "sh": + schema_file = "task_exec_ssh.py.jinja2" + + elif script_type == "static": + schema_file = "task_exec_escript.py.jinja2" + + elif script_type == "npsscript": + schema_file = "task_exec_powershell.py.jinja2" + + elif cls.type == "SET_VARIABLE": + variables = cls.attrs.get("eval_variables", None) + if variables: + user_attrs["variables"] = variables + script_type = cls.attrs["script_type"] + cls.attrs["script_file"] = create_script_file( + script_type, cls.attrs["script"], entity_context + ) + + if script_type == "sh": + schema_file = "task_setvariable_ssh.py.jinja2" + + elif script_type == "static": + schema_file = "task_setvariable_escript.py.jinja2" + + elif script_type == "npsscript": + schema_file = "task_setvariable_powershell.py.jinja2" + + elif cls.type == "DELAY": + if hasattr(cls, "attrs"): + user_attrs["delay_seconds"] = cls.attrs.get("interval_secs", 0) + schema_file = "task_delay.py.jinja2" + + elif cls.type == "SCALING": + scaling_count = cls.attrs.get("scaling_count", 1) + if scaling_count: + user_attrs["scaling_count"] = scaling_count + scaling_type = cls.attrs["scaling_type"] + if scaling_type == "SCALEOUT": + schema_file = "task_scaling_scaleout.py.jinja2" + + elif scaling_type == "SCALEIN": + schema_file = "task_scaling_scalein.py.jinja2" + elif cls.type == "HTTP": + attrs = cls.attrs + user_attrs["headers"] = {} + user_attrs["secret_headers"] = {} + user_attrs["status_mapping"] = {} + + for var in attrs.get("headers", []): + var_type = var["type"] + if var_type == "LOCAL": + user_attrs["headers"][var["name"]] = var["value"] + + elif var_type == "SECRET": + user_attrs["secret_headers"][var["name"]] = var["value"] + + for status in attrs.get("expected_response_params", []): + user_attrs["status_mapping"][status["code"]] = ( + True if status["status"] == "SUCCESS" else False + ) + + # Store auth objects + auth_obj = attrs.get("authentication", {}) + auth_type = auth_obj.get("type", "") + if auth_type == "basic_with_cred": + auth_cred = auth_obj.get("credential_local_reference", None) + if auth_cred: + user_attrs["cred"] = "ref({})".format( + get_cred_var_name( + getattr(auth_cred, "name", "") or auth_cred.__name__ + ) + ) + + user_attrs["response_paths"] = attrs.get("response_paths", {}) + method = attrs["method"] + + if (method == "POST" or method == "PUT") and not cls.attrs["request_body"]: + cls.attrs["request_body"] = {} + + if method == "GET": + schema_file = "task_http_get.py.jinja2" + + elif method == "POST": + schema_file = "task_http_post.py.jinja2" + + elif method == "PUT": + schema_file = "task_http_put.py.jinja2" + + elif method == "DELETE": + # TODO remove it from here + if not cls.attrs["request_body"]: + cls.attrs["request_body"] = {} + schema_file = "task_http_delete.py.jinja2" + + elif cls.type == "CALL_RUNBOOK": + runbook = cls.attrs["runbook_reference"] + runbook_name = getattr(runbook, "name", "") or runbook.__name__ + user_attrs = { + "name": cls.name, + "action": RUNBOOK_ACTION_MAP[runbook_name], + "target": target.name, + } + schema_file = "task_call_runbook.py.jinja2" + + elif cls.type == "CALL_CONFIG": + config_name = cls.attrs["config_spec_reference"] + user_attrs = { + "name": cls.name, + "config": CONFIG_SPEC_MAP[config_name]["global_name"], + } + schema_file = "task_call_config.py.jinja2" + + else: + LOG.error("Task type does not match any known types") + sys.exit("Invalid task task") + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() + + +def create_script_file(script_type, script="", entity_context=""): + """create the script file and return the file location""" + + # Use task context for unique names + file_name = entity_context + scripts_dir = get_scripts_dir() + + if script_type == "sh": + file_name += ".sh" + + elif script_type == "npsscript": + file_name += ".ps1" + + elif script_type == "static": + file_name += ".py" + + else: + raise TypeError("Script Type {} not supported".format(script_type)) + + file_location = os.path.join(scripts_dir, file_name) + with open(file_location, "w+") as fd: + fd.write(script) + + dsl_file_location = "os.path.join('{}', '{}')".format( + get_scripts_dir_key(), file_name + ) + return dsl_file_location diff --git a/framework/calm/dsl/decompile/variable.py b/framework/calm/dsl/decompile/variable.py new file mode 100644 index 0000000..0263a05 --- /dev/null +++ b/framework/calm/dsl/decompile/variable.py @@ -0,0 +1,199 @@ +import os + +from calm.dsl.decompile.render import render_template +from calm.dsl.decompile.task import render_task_template +from calm.dsl.builtins import VariableType, TaskType +from calm.dsl.decompile.file_handler import get_local_dir +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) +SECRET_VAR_FILES = [] + + +def render_variable_template(cls, entity_context): + + LOG.debug("Rendering {} variable template".format(cls.__name__)) + if not isinstance(cls, VariableType): + raise TypeError("{} is not of type {}".format(cls, VariableType)) + + # Updating the context of variables + entity_context = entity_context + "_variable_" + cls.__name__ + + user_attrs = cls.get_user_attrs() + user_attrs["description"] = cls.__doc__ or "" + + # Escape new line character. As it is inline parameter for CalmVariable helper + user_attrs["description"] = user_attrs["description"].replace("\n", "\\n") + + var_val_type = getattr(cls, "value_type", "STRING") + var_type = "" + schema_file = None + + if not cls.options: + var_type = "simple" + + else: + options = cls.options.get_dict() + choices = options.get("choices", []) + option_type = options.get("type", "") + + if (not choices) and (option_type == "PREDEFINED"): + var_type = "simple" + + if cls.regex: + regex = cls.regex.get_dict() + user_attrs["regex"] = regex.get("value", None) + user_attrs["validate_regex"] = regex.get("should_validate", False) + + else: + user_attrs["regex"] = None + user_attrs["validate_regex"] = False + + if cls.editables: + user_attrs["runtime"] = cls.editables["value"] + else: + user_attrs["runtime"] = False + + user_attrs["name"] = cls.__name__ + + if var_type == "simple": + is_secret = True if user_attrs["type"] == "SECRET" else False + + if is_secret: + user_attrs["value"] = get_secret_var_val(entity_context) + if var_val_type == "STRING": + schema_file = "var_simple_secret_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_simple_secret_int.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_simple_secret_time.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_simple_secret_date.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_simple_secret_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + schema_file = "var_simple_secret_multiline.py.jinja2" + + else: + if var_val_type == "STRING": + schema_file = "var_simple_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_simple_int.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_simple_time.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_simple_date.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_simple_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + user_attrs["value"] = repr(user_attrs["value"]) + schema_file = "var_simple_multiline.py.jinja2" + + else: + data_type = cls.data_type + options = cls.options.get_dict() + option_type = options.get("type", "PREDEFINED") + + if option_type == "PREDEFINED": + user_attrs["choices"] = options.get("choices", []) + + if data_type == "BASE": + if var_val_type == "STRING": + schema_file = "var_with_options_predefined_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_with_options_predefined_int.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_with_options_predefined_date.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_with_options_predefined_time.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_with_options_predefined_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + user_attrs["value"] = repr(user_attrs["value"]) + schema_file = "var_with_options_predefined_multiline.py.jinja2" + + else: + defaults = cls.value + user_attrs["value"] = defaults.split(",") + if var_val_type == "STRING": + schema_file = "var_with_options_predefined_array_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_with_options_predefined_array_int.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_with_options_predefined_array_date.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_with_options_predefined_array_time.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_with_options_predefined_array_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + user_attrs["value"] = repr(user_attrs["value"]) + schema_file = ( + "var_with_options_predefined_array_multiline.py.jinja2" + ) + + else: + options.pop("choices", None) + task = TaskType.decompile(options) + task.__name__ = "SampleTask" + user_attrs["value"] = render_task_template( + task, entity_context=entity_context + ) + + if data_type == "BASE": + if var_val_type == "STRING": + schema_file = "var_with_options_fromTask_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_with_options_fromTask_int.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_with_options_fromTask_date.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_with_options_fromTask_time.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_with_options_fromTask_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + schema_file = "var_with_options_fromTask_multiline.py.jinja2" + else: + if var_val_type == "STRING": + schema_file = "var_with_options_fromTask_array_string.py.jinja2" + elif var_val_type == "INT": + schema_file = "var_with_options_fromTask_array_int.py.jinja2" + elif var_val_type == "DATE": + schema_file = "var_with_options_fromTask_array_date.py.jinja2" + elif var_val_type == "TIME": + schema_file = "var_with_options_fromTask_array_time.py.jinja2" + elif var_val_type == "DATE_TIME": + schema_file = "var_with_options_fromTask_array_datetime.py.jinja2" + elif var_val_type == "MULTILINE_STRING": + schema_file = "var_with_options_fromTask_array_multiline.py.jinja2" + + if not schema_file: + raise Exception("Unknown variable type") + + text = render_template(schema_file=schema_file, obj=user_attrs) + return text.strip() + + +def get_secret_var_val(entity_context): + + global SECRET_VAR_FILES + + SECRET_VAR_FILES.append(entity_context) + file_location = os.path.join(get_local_dir(), entity_context) + + with open(file_location, "w+") as fd: + fd.write("") + + # Replace read_local_file by a constant + return entity_context + + +def get_secret_variable_files(): + """return the global local files used for secret variables""" + + return SECRET_VAR_FILES + + +def init_variable_globals(): + + global SECRET_VAR_FILES + SECRET_VAR_FILES = [] diff --git a/framework/calm/dsl/decompile/vm_disk_package.py b/framework/calm/dsl/decompile/vm_disk_package.py new file mode 100644 index 0000000..cd52ce7 --- /dev/null +++ b/framework/calm/dsl/decompile/vm_disk_package.py @@ -0,0 +1,26 @@ +from calm.dsl.decompile.render import render_template +from calm.dsl.builtins import VmDiskPackageType +from calm.dsl.decompile.ref_dependency import update_package_name + + +def render_vm_disk_package_template(cls): + + if not isinstance(cls, VmDiskPackageType): + raise TypeError("{} is not of type {}".format(cls, VmDiskPackageType)) + + # It will be used for image reference in ahv vm disks + gui_display_name = getattr(cls, "name", "") or cls.__name__ + update_package_name(gui_display_name, cls.__name__) + + disk_data = cls.get_dict() + user_attrs = { + "name": cls.__name__, + "description": disk_data.pop("description"), + "config": disk_data, + } + + # Escape new line character. As it is inline parameter for vm_disk_package helper + user_attrs["description"] = user_attrs["description"].replace("\n", "\\n") + + text = render_template("vm_disk_package.py.jinja2", obj=user_attrs) + return text.strip() diff --git a/framework/calm/dsl/init/__init__.py b/framework/calm/dsl/init/__init__.py new file mode 100644 index 0000000..2ac5e98 --- /dev/null +++ b/framework/calm/dsl/init/__init__.py @@ -0,0 +1,5 @@ +from .blueprint import init_bp +from .runbook import init_runbook + + +__all__ = ["init_bp", "init_runbook"] diff --git a/framework/calm/dsl/init/blueprint/__init__.py b/framework/calm/dsl/init/blueprint/__init__.py new file mode 100644 index 0000000..9ec39a9 --- /dev/null +++ b/framework/calm/dsl/init/blueprint/__init__.py @@ -0,0 +1,3 @@ +from .render import init_bp + +__all__ = ["init_bp"] diff --git a/framework/calm/dsl/init/blueprint/ahv_blueprint.py.jinja2 b/framework/calm/dsl/init/blueprint/ahv_blueprint.py.jinja2 new file mode 100644 index 0000000..3bcd6c6 --- /dev/null +++ b/framework/calm/dsl/init/blueprint/ahv_blueprint.py.jinja2 @@ -0,0 +1,252 @@ +{% macro BlueprintTemplate(bp_name, subnet_name, cluster_name) -%} +# THIS FILE IS AUTOMATICALLY GENERATED. +""" +Sample Calm DSL for {{bp_name}} blueprint + +The top-level folder contains the following files: +HelloBlueprint/ +├── .local +│ └── keys +│ ├── centos +│ └── centos_pub +├── blueprint.py +└── scripts + ├── pkg_install_task.sh + └── pkg_uninstall_task.sh + +On launch, this blueprint does the following: + 1. Creates AHV VM (2 vCPUs, 4G Mem, 1 core) + 2. Installs CentOS 7 by downloading image from http://download.nutanix.com. + 3. Injects SSH public key in the VM using cloud init. + 4. Creates calm credential using the SSH private key to run tasks on the VM. + +Order of execution for every deployment during blueprint launch: + 1. Substrate.__pre_create__() (Only http and escript tasks are allowed here) + 2. Substrate.__create__() (Generated from provider_spec) + 3. Package.__install__() (Scripts to install application go here) + 4. Service.__create__() (Scripts to configure and create the service go here) + 5. Service.__start__() (Scripts to start the service go here) + +Useful commands (execute from top-level directory): + 1. calm compile bp --file {{bp_name}}Blueprint/blueprint.py + 2. calm create bp --file {{bp_name}}Blueprint/blueprint.py --name + 3. calm get bps --name + 4. calm describe bp + 5. calm launch bp --app_name -i + 6. calm get apps --name + 7. calm describe app + 8. calm delete app + 9. calm delete bp + +""" + +import os + +from calm.dsl.builtins import Service, Package, Substrate +from calm.dsl.builtins import Deployment, Profile, Blueprint +from calm.dsl.builtins import CalmVariable as Variable +from calm.dsl.builtins import CalmTask as Task +from calm.dsl.builtins import action, parallel, ref, basic_cred +from calm.dsl.builtins import read_local_file +from calm.dsl.builtins import vm_disk_package, AhvVmDisk, AhvVmNic +from calm.dsl.builtins import AhvVmGC, AhvVmResources, AhvVm + + +# SSH Credentials +CENTOS_USER = "centos" +CENTOS_KEY = read_local_file(os.path.join("keys", "centos")) +CENTOS_PUBLIC_KEY = read_local_file(os.path.join("keys", "centos_pub")) +CentosCred = basic_cred( + CENTOS_USER, CENTOS_KEY, name="Centos", type="KEY", default=True, +) + +# OS Image details for VM +CENTOS_IMAGE_SOURCE = "http://download.nutanix.com/calm/CentOS-7-x86_64-1810.qcow2" +CentosPackage = vm_disk_package( + name="centos_disk", config={"image": {"source": CENTOS_IMAGE_SOURCE}}, +) + + +class {{bp_name}}Service(Service): + """Sample Service""" + + # Service Variables + ENV = Variable.WithOptions.Predefined.string( + ["DEV", "PROD"], default="DEV", is_mandatory=True, runtime=True + ) + + # Service Actions + @action + def __create__(): + # Step 1 + Task.Exec.ssh(name="Task1", script="echo 'Service create in ENV=@@{ENV}@@'") + + @action + def __start__(): + # Step 1 + Task.Exec.ssh(name="Task1", script="echo 'Service start in ENV=@@{ENV}@@'") + + @action + def __stop__(): + # Step 1 + Task.Exec.ssh(name="Task1", script="echo 'Service stop in ENV=@@{ENV}@@'") + + @action + def __delete__(): + # Step 1 + Task.Exec.ssh(name="Task1", script="echo 'Service delete in ENV=@@{ENV}@@'") + + # Custom service actions + @action + def custom_action_1(): + """Sample service action""" + + # Step 1 + Task.Exec.ssh(name="Task11", script='echo "Hello"') + + # Step 2 + Task.Exec.ssh(name="Task12", script='echo "Hello again"') + + @action + def custom_action_2(): + + # Step 1 + Task.Exec.ssh(name="Task21", script="date") + + # Step 2 + with parallel(): # All tasks within this context will be run in parallel + Task.Exec.ssh(name="Task22a", script="date") + Task.Exec.ssh(name="Task22b", script="date") + + # Step 3 + Task.Exec.ssh(name="Task23", script="date") + + +class {{bp_name}}Package(Package): + """Sample Package""" + + # Services created by installing this Package + services = [ref({{bp_name}}Service)] + + # Package Variables + sample_pkg_var = Variable.Simple("Sample package installation") + + # Package Actions + @action + def __install__(): + + # Step 1 + Task.Exec.ssh( + name="Task1", filename=os.path.join("scripts", "pkg_install_task.sh") + ) + + @action + def __uninstall__(): + + # Step 1 + Task.Exec.ssh( + name="Task1", filename=os.path.join("scripts", "pkg_uninstall_task.sh") + ) + + +class {{bp_name}}VmResources(AhvVmResources): + + memory = 4 + vCPUs = 2 + cores_per_vCPU = 1 + disks = [ + AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(CentosPackage, bootable=True), + ] + nics = [AhvVmNic.DirectNic.ingress(subnet="{{subnet_name}}", cluster="{{cluster_name}}")] + + guest_customization = AhvVmGC.CloudInit( + config={ + "users": [ + { + "name": CENTOS_USER, + "ssh-authorized-keys": [CENTOS_PUBLIC_KEY], + "sudo": ["ALL=(ALL) NOPASSWD:ALL"], + } + ] + } + ) + + +class {{bp_name}}Vm(AhvVm): + + resources = {{bp_name}}VmResources + categories = {"AppFamily": "Demo", "AppType": "Default"} + + +class {{bp_name}}Substrate(Substrate): + """AHV VM Substrate""" + + provider_type = "AHV_VM" + provider_spec = {{bp_name}}Vm + + # Substrate Actions + @action + def __pre_create__(): + + # Step 1 + Task.Exec.escript( + name="Task1", script="print 'Pre Create task runs before VM is created'" + ) + + @action + def __post_delete__(): + + # Step 1 + Task.Exec.escript( + name="Task1", script="print 'Post delete task runs after VM is deleted'" + ) + + +class {{bp_name}}Deployment(Deployment): + """Sample Deployment""" + + packages = [ref({{bp_name}}Package)] + substrate = ref({{bp_name}}Substrate) + + +class {{bp_name}}Profile(Profile): + + # Deployments under this profile + deployments = [{{bp_name}}Deployment] + + # Profile Variables + var1 = Variable.Simple("sample_val1", runtime=True) + var2 = Variable.Simple("sample_val2", runtime=True) + var3 = Variable.Simple.int("2", validate_regex=True, regex=r"^[\d]*$") + + # Profile Actions + @action + def custom_profile_action_1(): + """Sample description for a profile action""" + + # Step 1: Run a task on a service in the profile + Task.Exec.ssh( + name="Task1", + script='echo "Profile level action using @@{var1}@@ and @@{var2}@@ and @@{var3}@@"', + target=ref({{bp_name}}Service), + ) + + # Step 2: Call service action as a task. + # It will execute all tasks under the given action. + {{bp_name}}Service.custom_action_1(name="Task6") + + +class {{bp_name}}(Blueprint): + """ Sample blueprint for {{bp_name}} app using AHV VM""" + + credentials = [CentosCred] + services = [{{bp_name}}Service] + packages = [{{bp_name}}Package, CentosPackage] + substrates = [{{bp_name}}Substrate] + profiles = [{{bp_name}}Profile] + + +{%- endmacro %} + + +{{BlueprintTemplate(bp_name, subnet_name, cluster_name)}} diff --git a/framework/calm/dsl/init/blueprint/ahv_single_vm_blueprint.py.jinja2 b/framework/calm/dsl/init/blueprint/ahv_single_vm_blueprint.py.jinja2 new file mode 100644 index 0000000..a6ca1f4 --- /dev/null +++ b/framework/calm/dsl/init/blueprint/ahv_single_vm_blueprint.py.jinja2 @@ -0,0 +1,151 @@ +{% macro BlueprintTemplate(bp_name, subnet_name, cluster_name, vm_image) -%} +# THIS FILE IS AUTOMATICALLY GENERATED. +""" +Sample Calm DSL for {{bp_name}} blueprint + +The top-level folder contains the following files: +HelloBlueprint/ +├── .local +│ └── keys +│ ├── centos +│ └── centos_pub +├── blueprint.py +└── scripts + ├── pkg_install_task.sh + └── pkg_uninstall_task.sh + +On launch, this blueprint does the following: + 1. Creates AHV VM (2 vCPUs, 4G Mem, 1 core) + 2. Installs {{vm_image}} image on vm. + 3. Injects SSH public key in the VM using cloud init. + 4. Creates calm credential using the SSH private key to run tasks on the VM. + +Order of execution for every deployment during blueprint launch: + 1. Substrate.__pre_create__() (Only http and escript tasks are allowed here) + 2. Substrate.__create__() (Generated from provider_spec) + 3. Package.__install__() (Scripts to install application go here) + 4. Service.__create__() (Scripts to configure and create the service go here) + 5. Service.__start__() (Scripts to start the service go here) + +Useful commands (execute from top-level directory): + 1. calm compile bp --file {{bp_name}}Blueprint/blueprint.py + 2. calm create bp --file {{bp_name}}Blueprint/blueprint.py --name + 3. calm get bps --name + 4. calm describe bp + 5. calm launch bp --app_name -i + 6. calm get apps --name + 7. calm describe app + 8. calm delete app + 9. calm delete bp + +""" + +import os + +from calm.dsl.builtins import CalmVariable as Variable +from calm.dsl.builtins import CalmTask as Task +from calm.dsl.builtins import action, basic_cred +from calm.dsl.builtins import read_local_file +from calm.dsl.builtins import AhvVmResources, ahv_vm +from calm.dsl.builtins import AhvVmDisk, AhvVmNic, AhvVmGC +from calm.dsl.builtins import VmProfile, VmBlueprint +from calm.dsl.builtins import Metadata + + +# SSH Credentials +CENTOS_USER = "centos" +CENTOS_KEY = read_local_file(os.path.join("keys", "centos")) +CENTOS_PUBLIC_KEY = read_local_file(os.path.join("keys", "centos_pub")) +CentosCred = basic_cred( + CENTOS_USER, CENTOS_KEY, name="Centos", type="KEY", default=True, +) + + +class {{bp_name}}VmResources(AhvVmResources): + + memory = 4 + vCPUs = 2 + cores_per_vCPU = 1 + disks = [ + AhvVmDisk.Disk.Scsi.cloneFromImageService("{{vm_image}}", bootable=True), + ] + nics = [AhvVmNic.DirectNic.ingress(subnet="{{subnet_name}}", cluster="{{cluster_name}}")] + + guest_customization = AhvVmGC.CloudInit( + config={ + "users": [ + { + "name": CENTOS_USER, + "ssh-authorized-keys": [CENTOS_PUBLIC_KEY], + "sudo": ["ALL=(ALL) NOPASSWD:ALL"], + } + ] + } + ) + + +class {{bp_name}}Profile(VmProfile): + + # Profile Variables + var1 = Variable.Simple("sample_val1", runtime=True) + var2 = Variable.Simple("sample_val2", runtime=True) + + # Vm Spec for Substrate + provider_spec = ahv_vm(resources={{bp_name}}VmResources, name="{{bp_name}}Vm") + + # Package Actions + @action + def __install__(): + + # Package install variable + sample_pkg_var = Variable.Simple("Sample package installation") + + # Step 1 + Task.Exec.ssh( + name="Task1", filename=os.path.join("scripts", "pkg_install_task.sh") + ) + + @action + def __uninstall__(): + + # Step 1 + Task.Exec.ssh( + name="Task1", filename=os.path.join("scripts", "pkg_uninstall_task.sh") + ) + + # Substrate Actions + @action + def __pre_create__(): + + # Step 1 + Task.Exec.escript( + name="Task1", script="print 'Pre Create task runs before VM is created'" + ) + + # Profile Actions + @action + def custom_profile_action_1(): + """Sample description for a profile action""" + + # Note: Runbook Tasks, Deployment tasks are not allowed + Task.Exec.ssh( + name="Task1", + script='echo "Profile level action using @@{var1}@@ and @@{var2}@@"', + ) + + +class {{bp_name}}(VmBlueprint): + + # Credentials for blueprint + credentials = [CentosCred] + + profiles = [{{bp_name}}Profile] + + +class {{bp_name}}Metadata(Metadata): + + categories = {"TemplateType": "Vm"} + +{%- endmacro %} + +{{BlueprintTemplate(bp_name, subnet_name, cluster_name, vm_image)}} diff --git a/framework/calm/dsl/init/blueprint/render.py b/framework/calm/dsl/init/blueprint/render.py new file mode 100644 index 0000000..acc2259 --- /dev/null +++ b/framework/calm/dsl/init/blueprint/render.py @@ -0,0 +1,326 @@ +import os +import sys +import json +from jinja2 import Environment, PackageLoader +from Crypto.PublicKey import RSA + +from calm.dsl.config import get_context +from calm.dsl.store import Cache +from calm.dsl.builtins import read_file +from calm.dsl.log import get_logging_handle +from calm.dsl.providers import get_provider +from calm.dsl.constants import CACHE + +LOG = get_logging_handle(__name__) + + +def render_ahv_template(template, bp_name): + + ContextObj = get_context() + + project_config = ContextObj.get_project_config() + project_name = project_config.get("name") or "default" + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format(project_name) + ) + sys.exit(-1) + + # Fetch Nutanix_PC account registered + project_accounts = project_cache_data["accounts_data"] + account_uuids = project_accounts.get("nutanix_pc", []) + if not account_uuids: + LOG.error("No nutanix_pc account registered to project {}".format(project_name)) + + # Fetch data for first account + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=account_uuids[0] + ) + if not account_cache_data: + LOG.error( + "Account (uuid={}) not found. Please update cache".format(account_uuids[0]) + ) + sys.exit(-1) + + account_uuid = account_cache_data["uuid"] + account_name = account_cache_data["name"] + + # Fetch whitelisted subnets + whitelisted_subnets = project_cache_data["whitelisted_subnets"] + if not whitelisted_subnets: + LOG.error("No subnets registered to project {}".format(project_name)) + sys.exit(-1) + + account_subnets = whitelisted_subnets.get(account_uuid, []) + if not account_subnets: + LOG.error( + "No subnets registered to project {} for Nutanix PC account {}.".format( + project_name, account_name + ) + ) + sys.exit(-1) + + # Fetch data for first subnet + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, + uuid=account_subnets[0], + account_uuid=account_uuid, + ) + if not subnet_cache_data: + # Case when project have a subnet that is not available in subnets from registered account + context_data = { + "Project Whitelisted Subnets": account_subnets, + "Account UUID": account_uuid, + "Project Name": project_name, + } + LOG.debug( + "Context data: {}".format( + json.dumps(context_data, indent=4, separators=(",", ": ")) + ) + ) + LOG.error( + "Subnet configuration mismatch in registered account's subnets and whitelisted subnets in project" + ) + sys.exit(-1) + + cluster_name = subnet_cache_data["cluster_name"] + default_subnet = subnet_cache_data["name"] + LOG.info( + "Using Nutanix PC account {}, cluster {}, subnet {}".format( + account_name, cluster_name, default_subnet + ) + ) + LOG.info("Rendering ahv template") + text = template.render( + bp_name=bp_name, subnet_name=default_subnet, cluster_name=cluster_name + ) + + return text.strip() + os.linesep + + +def render_single_vm_bp_ahv_template(template, bp_name): + + ContextObj = get_context() + + project_config = ContextObj.get_project_config() + project_name = project_config.get("name") or "default" + project_cache_data = Cache.get_entity_data( + entity_type=CACHE.ENTITY.PROJECT, name=project_name + ) + if not project_cache_data: + LOG.error( + "Project {} not found. Please run: calm update cache".format(project_name) + ) + sys.exit(-1) + + # Fetch Nutanix_PC account registered + project_accounts = project_cache_data["accounts_data"] + account_uuids = project_accounts.get("nutanix_pc", []) + if not account_uuids: + LOG.error("No nutanix_pc account registered to project {}".format(project_name)) + + # Fetch data for first account + account_cache_data = Cache.get_entity_data_using_uuid( + entity_type="account", uuid=account_uuids[0] + ) + if not account_cache_data: + LOG.error( + "Account (uuid={}) not found. Please update cache".format(account_uuids[0]) + ) + sys.exit(-1) + + account_uuid = account_cache_data["uuid"] + account_name = account_cache_data["name"] + + # Fetch whitelisted subnets + whitelisted_subnets = project_cache_data["whitelisted_subnets"] + if not whitelisted_subnets: + LOG.error("No subnets registered to project {}".format(project_name)) + sys.exit(-1) + + account_subnets = whitelisted_subnets.get(account_uuid, []) + if not account_subnets: + LOG.error( + "No subnets registered to project {} for Nutanix PC account {}.".format( + project_name, account_name + ) + ) + sys.exit(-1) + + # Fetch data for first subnet + subnet_cache_data = Cache.get_entity_data_using_uuid( + entity_type=CACHE.ENTITY.AHV_SUBNET, + uuid=account_subnets[0], + account_uuid=account_uuid, + ) + if not subnet_cache_data: + # Case when project have a subnet that is not available in subnets from registered account + context_data = { + "Project Whitelisted Subnets": account_subnets, + "Account UUID": account_uuid, + "Project Name": project_name, + } + LOG.debug( + "Context data: {}".format( + json.dumps(context_data, indent=4, separators=(",", ": ")) + ) + ) + LOG.error( + "Subnet configuration mismatch in registered account's subnets and whitelisted subnets in project" + ) + sys.exit(-1) + + cluster_name = subnet_cache_data["cluster_name"] + default_subnet = subnet_cache_data["name"] + + # Fetch image for vm + AhvVmProvider = get_provider("AHV_VM") + AhvObj = AhvVmProvider.get_api_obj() + try: + res = AhvObj.images(account_uuid=account_uuid) + except Exception: + LOG.error( + "Unable to fetch images for Nutanix_PC Account(uuid={})".format( + account_uuid + ) + ) + sys.exit(-1) + + # NOTE: Make sure you use `DISK` image in your jinja template + vm_image = None + for entity in res["entities"]: + name = entity["status"]["name"] + image_type = entity["status"]["resources"].get("image_type", None) or "" + + if image_type == "DISK_IMAGE": + vm_image = name + break + + if not vm_image: + LOG.error("No Disk image found on account(uuid='{}')".format(account_uuid)) + sys.exit(-1) + + LOG.info( + "Using Nutanix PC account {}, cluster {}, subnet {}".format( + account_name, cluster_name, default_subnet + ) + ) + LOG.info("Rendering ahv template") + text = template.render( + bp_name=bp_name, + subnet_name=default_subnet, + cluster_name=cluster_name, + vm_image=vm_image, + ) + + return text.strip() + os.linesep + + +template_map = { + "AHV_VM": { + "MULTI_VM": ("ahv_blueprint.py.jinja2", render_ahv_template), + "SINGLE_VM": ( + "ahv_single_vm_blueprint.py.jinja2", + render_single_vm_bp_ahv_template, + ), + } +} + + +def render_blueprint_template(bp_name, provider_type, bp_type): + + if provider_type not in template_map: + print( + "Provider {} not supported. Using AHV_VM as provider".format(provider_type) + ) + provider_type = "AHV_VM" + + schema_file, temp_render_helper = template_map.get(provider_type).get(bp_type) + + loader = PackageLoader(__name__, "") + env = Environment(loader=loader) + template = env.get_template(schema_file) + + return temp_render_helper(template, bp_name) + + +def create_bp_file(dir_name, bp_name, provider_type, bp_type): + + bp_text = render_blueprint_template(bp_name, provider_type, bp_type) + bp_path = os.path.join(dir_name, "blueprint.py") + + LOG.info("Writing bp file to {}".format(bp_path)) + with open(bp_path, "w") as fd: + fd.write(bp_text) + + +def create_cred_keys(dir_name): + + # Will create key via name centos/centos_pub + + key = RSA.generate(2048) + + # Write private key + private_key = key.export_key("PEM") + private_key_filename = os.path.join(dir_name, "centos") + with open(private_key_filename, "wb") as fd: + fd.write(private_key) + os.chmod(private_key_filename, 0o600) + + # Write public key + public_key = key.publickey().export_key("OpenSSH") + public_key_filename = os.path.join(dir_name, "centos_pub") + with open(public_key_filename, "wb") as fd: + fd.write(public_key) + os.chmod(public_key_filename, 0o600) + + +def create_scripts(dir_name): + + dir_path = os.path.dirname(os.path.realpath(__file__)) + scripts_dir = os.path.join(dir_path, "scripts") + for script_file in os.listdir(scripts_dir): + script_path = os.path.join(scripts_dir, script_file) + data = read_file(script_path) + + with open(os.path.join(dir_name, script_file), "w+") as fd: + fd.write(data) + + +def make_bp_dirs(dir_name, bp_name): + + bp_dir = "{}Blueprint".format(os.path.join(dir_name, bp_name)) + if not os.path.isdir(bp_dir): + os.makedirs(bp_dir) + + local_dir = os.path.join(bp_dir, ".local") + if not os.path.isdir(local_dir): + os.makedirs(local_dir) + + key_dir = os.path.join(local_dir, "keys") + if not os.path.isdir(key_dir): + os.makedirs(key_dir) + + script_dir = os.path.join(bp_dir, "scripts") + if not os.path.isdir(script_dir): + os.makedirs(script_dir) + + return (bp_dir, key_dir, script_dir) + + +def init_bp(bp_name, dir_name, provider_type, bp_type): + + bp_name = bp_name.strip().split()[0].title() + bp_dir, key_dir, script_dir = make_bp_dirs(dir_name, bp_name) + + # Creating keys + LOG.info("Generating keys for credentials") + create_cred_keys(key_dir) + + # create scripts + create_scripts(script_dir) + + create_bp_file(bp_dir, bp_name, provider_type, bp_type) diff --git a/framework/calm/dsl/init/blueprint/scripts/pkg_install_task.sh b/framework/calm/dsl/init/blueprint/scripts/pkg_install_task.sh new file mode 100644 index 0000000..8c9a76f --- /dev/null +++ b/framework/calm/dsl/init/blueprint/scripts/pkg_install_task.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +echo "@@{sample_pkg_var}@@" + +sudo yum install epel-release -y +sudo yum update -y + +echo "Package installation steps go here ..." diff --git a/framework/calm/dsl/init/blueprint/scripts/pkg_uninstall_task.sh b/framework/calm/dsl/init/blueprint/scripts/pkg_uninstall_task.sh new file mode 100644 index 0000000..28224a6 --- /dev/null +++ b/framework/calm/dsl/init/blueprint/scripts/pkg_uninstall_task.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +set -ex + +echo "Package uninstallation steps go here ..." diff --git a/framework/calm/dsl/init/runbook/__init__.py b/framework/calm/dsl/init/runbook/__init__.py new file mode 100644 index 0000000..3f514c3 --- /dev/null +++ b/framework/calm/dsl/init/runbook/__init__.py @@ -0,0 +1,3 @@ +from .render import init_runbook + +__all__ = ["init_runbook"] diff --git a/framework/calm/dsl/init/runbook/render.py b/framework/calm/dsl/init/runbook/render.py new file mode 100644 index 0000000..6bc71ab --- /dev/null +++ b/framework/calm/dsl/init/runbook/render.py @@ -0,0 +1,78 @@ +import os +from jinja2 import Environment, PackageLoader + +from calm.dsl.builtins import read_file +from calm.dsl.log import get_logging_handle +from calm.dsl.config import get_context + +LOG = get_logging_handle(__name__) + + +def render_runbook_template(runbook_name): + + schema_file = "runbook.py.jinja2" + + loader = PackageLoader(__name__, "") + env = Environment(loader=loader) + template = env.get_template(schema_file) + LOG.info("Rendering runbook template") + + ContextObj = get_context() + server_config = ContextObj.get_server_config() + pc_ip = server_config["pc_ip"] + pc_port = server_config["pc_port"] + text = template.render( + runbook_name=runbook_name, + pc_ip=pc_ip, + pc_port=pc_port, + ) + LOG.info("Success") + + return text.strip() + os.linesep + + +def create_runbook_file(dir_name, runbook_name): + + rb_text = render_runbook_template(runbook_name) + rb_path = os.path.join(dir_name, "runbook.py") + + LOG.info("Writing runbook file to {}".format(rb_path)) + with open(rb_path, "w") as fd: + fd.write(rb_text) + LOG.info("Success") + + +def create_scripts(dir_name): + + dir_path = os.path.dirname(os.path.realpath(__file__)) + scripts_dir = os.path.join(dir_path, "scripts") + for script_file in os.listdir(scripts_dir): + script_path = os.path.join(scripts_dir, script_file) + data = read_file(script_path) + + with open(os.path.join(dir_name, script_file), "w+") as fd: + fd.write(data) + + +def make_runbook_dirs(dir_name, runbook_name): + + runbook_dir = "{}Runbook".format(os.path.join(dir_name, runbook_name)) + if not os.path.isdir(runbook_dir): + os.makedirs(runbook_dir) + + script_dir = os.path.join(runbook_dir, "scripts") + if not os.path.isdir(script_dir): + os.makedirs(script_dir) + + return (runbook_dir, script_dir) + + +def init_runbook(runbook_name, dir_name): + + runbook_name = runbook_name.strip().split()[0].title() + runbook_dir, script_dir = make_runbook_dirs(dir_name, runbook_name) + + # create scripts + create_scripts(script_dir) + + create_runbook_file(runbook_dir, runbook_name) diff --git a/framework/calm/dsl/init/runbook/runbook.py.jinja2 b/framework/calm/dsl/init/runbook/runbook.py.jinja2 new file mode 100644 index 0000000..deccf0e --- /dev/null +++ b/framework/calm/dsl/init/runbook/runbook.py.jinja2 @@ -0,0 +1,120 @@ +{% macro RunbookTemplate(runbook_name, pc_ip, pc_port) -%} +# THIS FILE IS AUTOMATICALLY GENERATED. +""" +Sample Calm DSL for {{runbook_name}} runbook + +The top-level folder contains the following files: +{{runbook_name}}Runbook/ +├── runbook.py +└── scripts + └── entity_stats.py + +On run, this runbook does the following: + 1. Creates a HTTP and a Linux Endpoint + 2. Create runbook variables to store calm entities count + 3. Added HTTP Tasks to get calm entities count + 4. Runs Exec Task (which prints entities status) and while loop task in parallel + 5. Under while loop task, a decision task is triggered using loop_variable + +Useful commands (execute from top-level directory): + 1. calm compile runbook --file {{runbook_name}}Runbook/runbook.py + 2. calm create runbook --file {{runbook_name}}Runbook/runbook.py --name + 3. calm get runbooks --name + 4. calm describe runbook + 5. calm run runbook -w + 6. calm get runbook_executions + 7. calm delete runbook + 8. calm watch runbook_execution + 9. calm pause runbook_execution + 10. calm resume runbook_execution + 11. calm abort runbook_execution + +""" +import json + +from calm.dsl.runbooks import RunbookVariable as Variable +from calm.dsl.runbooks import RunbookTask as Task, CalmEndpoint as Endpoint +from calm.dsl.runbooks import runbook, basic_cred +from calm.dsl.runbooks import parallel, branch + + +# Create Endpoints +Cred = basic_cred("vm_username", "vm_password", name="endpoint_cred") +PCEndpoint = Endpoint.HTTP("https://{{pc_ip}}:{{pc_port}}/api/nutanix/v3") +IPEndpoint = Endpoint.Linux.ip(["VM_IP"], cred=Cred) + + +@runbook +def {{runbook_name}}(endpoints=[PCEndpoint, IPEndpoint]): + """ Sample runbook for {{runbook_name}} """ + + # Defining variables for entity counts + endpoints_count = Variable.Simple.int("0", runtime=True) # noqa + blueprints_count = Variable.Simple.int("0", runtime=True) # noqa + runbooks_count = Variable.Simple.int("0", runtime=True) # noqa + apps_count = Variable.Simple.int("0", runtime=True) # noqa + + # HTTP Tasks to get CALM Entity Counts + # default target is set as endpoints[0] = PCEndpoint, therefore target not required for http tasks + Task.HTTP.post( + name="EndpointCount", + relative_url="/endpoints/list", + body=json.dumps({}), + headers={"Authorization": "Bearer @@{calm_jwt}@@"}, + content_type="application/json", + response_paths={"endpoints_count": "$.metadata.total_matches"}, + status_mapping={200: True}, + ) + Task.HTTP.post( + name="BlueprintCount", + relative_url="/blueprints/list", + body=json.dumps({}), + headers={"Authorization": "Bearer @@{calm_jwt}@@"}, + content_type="application/json", + response_paths={"blueprints_count": "$.metadata.total_matches"}, + status_mapping={200: True}, + ) + Task.HTTP.post( + name="RunbookCount", + relative_url="/runbooks/list", + body=json.dumps({}), + headers={"Authorization": "Bearer @@{calm_jwt}@@"}, + content_type="application/json", + response_paths={"runbooks_count": "$.metadata.total_matches"}, + status_mapping={200: True}, + ) + Task.HTTP.post( + name="AppCount", + relative_url="/apps/list", + body=json.dumps({}), + headers={"Authorization": "Bearer @@{calm_jwt}@@"}, + content_type="application/json", + response_paths={"apps_count": "$.metadata.total_matches"}, + status_mapping={200: True}, + ) + + # running tasks in parallel + with parallel() as p: + + with branch(p): + + # Exec Task to print all entity stats + Task.Exec.escript(name="EntityStats", filename="scripts/entity_stats.py2", target=endpoints[1]) + + with branch(p): + + # loop task example + with Task.Loop(iterations=2, name="LoopTask", loop_variable="loop_var"): + + # decision task example + with Task.Decision.escript(name="DecisionTask", script="print 'ExitCode-@@{loop_var}@@';exit(@@{loop_var}@@)") as d: + if d.ok: + Task.Exec.escript(name="TruePath", script="print 'True path is executed'") + else: + Task.Exec.escript(name="FalsePath", script="print 'False path is executed'") + + +{%- endmacro %} + + +{{RunbookTemplate(runbook_name, pc_ip, pc_port)}} diff --git a/framework/calm/dsl/init/runbook/scripts/entity_stats.py2 b/framework/calm/dsl/init/runbook/scripts/entity_stats.py2 new file mode 100644 index 0000000..a3dea80 --- /dev/null +++ b/framework/calm/dsl/init/runbook/scripts/entity_stats.py2 @@ -0,0 +1,7 @@ +# script to print calm entity counts + +print "Calm Entities Stats are as follows -" +print "Endpoint Count - @@{endpoints_count}@@" +print "Blueprint Count - @@{blueprints_count}@@" +print "Runbook Count - @@{runbooks_count}@@" +print "Apps Count - @@{apps_count}@@" diff --git a/framework/calm/dsl/log/__init__.py b/framework/calm/dsl/log/__init__.py new file mode 100644 index 0000000..54cbd2e --- /dev/null +++ b/framework/calm/dsl/log/__init__.py @@ -0,0 +1,4 @@ +from .logger import CustomLogging, get_logging_handle + + +__all__ = ["CustomLogging", "get_logging_handle"] diff --git a/framework/calm/dsl/log/logger.py b/framework/calm/dsl/log/logger.py new file mode 100644 index 0000000..6e373a7 --- /dev/null +++ b/framework/calm/dsl/log/logger.py @@ -0,0 +1,244 @@ +import logging +import inspect + +from colorlog import ColoredFormatter +import time +import sys + + +class StdErrFilter(logging.Filter): + """Filter for Stderr stream handler""" + + def filter(self, rec): + return rec.levelno >= logging.DEBUG + + +class CustomLogging: + """ + customization on logging module. + + custom logger with following log levels with appropriate color codes and + custom formatting for messages::“ + + * LOG.debug - [DEBUG] + * LOG.info - [INFO] + * LOG.warn - [WARNING] + * LOG.error - [ERROR] + * LOG.critical - [CRITICAL] + * LOG.exception - [ERROR] + + """ + + _VERBOSE_LEVEL = 20 + _SHOW_TRACE = False + + DEBUG = logging.DEBUG + INFO = logging.INFO + WARNING = logging.WARNING + ERROR = logging.ERROR + CRITICAL = logging.CRITICAL + + IS_RP_ENABLED = False + + def __init__(self, name): + """ + Build CustomLogger based on logging module + + Args: + name(str): name of the module/logger + + Returns: + None + """ + + self._ch1 = logging.StreamHandler() + self._ch1.addFilter(StdErrFilter()) + + # add custom formatter to console handler + self.__addCustomFormatter(self._ch1) + + # create custom logger + self._logger = logging.getLogger(name) + + # add console to logger + # self._logger.addHandler(self._ch1) + + # Add show trace option + self.show_trace = False + + @staticmethod + def __add_caller_info(msg): + # stack = inspect.stack() + # + # # filename = stack[2][1] + # # func = stack[2][3] + # ln = stack[2][2] + # if CustomLogging.IS_RP_ENABLED: + # ln = "{}-{}:{}".format(stack[2][1], stack[2][3], stack[2][2]) + # + # return ":{}] {}".format(ln, msg) + return msg + + @classmethod + def set_verbose_level(cls, lvl): + cls._VERBOSE_LEVEL = lvl + + @classmethod + def enable_show_trace(cls): + cls._SHOW_TRACE = True + + def get_logger(self): + self.set_logger_level(self._VERBOSE_LEVEL) + self.show_trace = self._SHOW_TRACE + return self._logger + + def get_logging_levels(self): + return ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + + def set_logger_level(self, lvl): + """sets the logger verbose level""" + pass + # self._logger.setLevel(lvl) + + def info(self, msg, nl=True, **kwargs): + """ + info log level + + Args: + msg (str): message to log + nl (bool): Add newline (default: True) + + Returns: + None + """ + logger = self.get_logger() + + if not nl: + for handler in logger.handlers: + handler.terminator = " " + + logger.info(self.__add_caller_info(msg), **kwargs) + + if not nl: + for handler in logger.handlers: + handler.terminator = "\n" + + def warning(self, msg, *args, **kwargs): + """ + warning log level + + Args: + msg (str): message to log + + Returns: + None + """ + + logger = self.get_logger() + return logger.warning(self.__add_caller_info(msg), *args, **kwargs) + + def error(self, msg, *args, **kwargs): + """ + error log level + + Args: + msg (str): message to log + + Returns: + None + """ + + logger = self.get_logger() + if self.show_trace: + kwargs["stack_info"] = sys.exc_info() + return logger.error(self.__add_caller_info(msg), *args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """ + exception log level + + Args: + msg (str): message to log + + Returns: + None + """ + + logger = self.get_logger() + exc_info = False + if self.show_trace: + exc_info = True + return logger.exception( + self.__add_caller_info(msg), exc_info=exc_info, *args, **kwargs + ) + + def critical(self, msg, *args, **kwargs): + """ + critical log level + + Args: + msg (str): message to log + + Returns: + None + """ + + logger = self.get_logger() + if self.show_trace: + kwargs["stack_info"] = sys.exc_info() + return logger.critical(self.__add_caller_info(msg), *args, **kwargs) + + def debug(self, msg, *args, **kwargs): + """ + debug log level + + Args: + msg (str): message to log + + Returns: + None + """ + + logger = self.get_logger() + return logger.debug(self.__add_caller_info(msg), *args, **kwargs) + + def __addCustomFormatter(self, ch): + """ + add ColorFormatter with custom colors for each log level + + Args: + None + + Returns + None + """ + + fmt = ( + "[%(asctime)s] " + "[%(log_color)s%(levelname)s%(reset)s] " + "[%(name)s%(message)s" + ) + + formatter = ColoredFormatter( + fmt, + datefmt="%Y-%m-%d %H:%M:%S", + reset=True, + log_colors={ + "DEBUG": "purple", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "red", + }, + ) + formatter.converter = time.gmtime + + # add formatter to console handler + ch.setFormatter(formatter) + + +def get_logging_handle(name): + """returns the CustomLogging object""" + + logging_handle = CustomLogging(name) + return logging_handle diff --git a/framework/calm/dsl/providers/__init__.py b/framework/calm/dsl/providers/__init__.py new file mode 100644 index 0000000..34bcd03 --- /dev/null +++ b/framework/calm/dsl/providers/__init__.py @@ -0,0 +1,17 @@ +from .base import ( + get_provider, + get_providers, + get_provider_types, + get_provider_interface, +) + +from .plugins import get_plugins + +__all__ = [ + "get_provider", + "get_providers", + "get_provider_types", + "get_provider_interface", +] + +get_plugins() diff --git a/framework/calm/dsl/providers/base.py b/framework/calm/dsl/providers/base.py new file mode 100644 index 0000000..a185443 --- /dev/null +++ b/framework/calm/dsl/providers/base.py @@ -0,0 +1,106 @@ +from collections import OrderedDict +from io import StringIO +import json + +from ruamel import yaml +from jinja2 import Environment, PackageLoader +import jsonref +from calm.dsl.tools import StrictDraft7Validator +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class ProviderBase: + + providers = OrderedDict() + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + provider_type = getattr(cls, "provider_type") + + if provider_type: + + # Init Provider + cls._init() + + # Register Provider + cls.providers[provider_type] = cls + + +class Provider(ProviderBase): + + provider_type = None + spec_template_file = None + package_name = None + + @classmethod + def _init(cls): + + if cls.package_name is None: + raise NotImplementedError("Package name not given") + + if cls.spec_template_file is None: + raise NotImplementedError("Spec file not given") + + loader = PackageLoader(cls.package_name, "") + env = Environment(loader=loader) + template = env.get_template(cls.spec_template_file) + tdict = yaml.safe_load(StringIO(template.render())) + tdict = jsonref.loads(json.dumps(tdict)) + + # TODO - Check if keys are present + cls.provider_spec = tdict["components"]["schemas"]["provider_spec"] + cls.Validator = StrictDraft7Validator(cls.provider_spec) + + @classmethod + def get_provider_spec(cls): + return cls.provider_spec + + @classmethod + def get_validator(cls): + return cls.Validator + + @classmethod + def validate_spec(cls, spec): + Validator = cls.get_validator() + Validator.validate(spec) + + @classmethod + def create_spec(cls): + raise NotImplementedError("Create spec not implemented") + + @classmethod + def get_api_obj(cls): + """returns object to call provider specific apis""" + + raise NotImplementedError("Api object not implemented") + + @classmethod + def get_runtime_editables( + cls, runtime_spec, project_id, substrate_spec, vm_img_map={} + ): + # Not implemented right now + pass + + +def get_provider(provider_type): + + if provider_type not in ProviderBase.providers: + LOG.debug("Registered providers: {}".format(ProviderBase.providers)) + raise Exception("provider not registered") + + return ProviderBase.providers[provider_type] + + +def get_providers(): + return ProviderBase.providers + + +def get_provider_types(): + return ProviderBase.providers.keys() + + +def get_provider_interface(): + return Provider diff --git a/framework/calm/dsl/providers/plugins/__init__.py b/framework/calm/dsl/providers/plugins/__init__.py new file mode 100644 index 0000000..e5f884b --- /dev/null +++ b/framework/calm/dsl/providers/plugins/__init__.py @@ -0,0 +1,29 @@ +import importlib +import pkgutil + + +_PLUGINS = None + + +def get_plugins(): + global _PLUGINS + if not _PLUGINS: + _PLUGINS = _import_plugins() + return _PLUGINS + + +def _import_plugins(name=__name__): + """Load all plugins under '.plugins' package""" + + package = importlib.import_module(name) + + results = {} + for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): + if not is_pkg: + continue + full_name = package.__name__ + "." + name + results[full_name] = importlib.import_module(full_name) + return results + + +__all__ = ["get_plugins"] diff --git a/framework/calm/dsl/providers/plugins/ahv_vm/__init__.py b/framework/calm/dsl/providers/plugins/ahv_vm/__init__.py new file mode 100644 index 0000000..b9f6928 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/ahv_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import AhvVmProvider + + +__all__ = ["AhvVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..fa11047 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,507 @@ + +{% macro ahvNIC() -%} + +title: AHV NIC +type: object +properties: + network_function_nic_type: + type: string + default: INGRESS + enum: [INGRESS, EGRESS, TAP] + nic_type: + type: string + default: NORMAL_NIC + enum: [NORMAL_NIC, DIRECT_NIC, NETWORK_FUNCTION_NIC] + type: + type: string + subnet_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: subnet + name: + type: string + type: + type: string + vpc_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: vpc + name: + type: string + type: + type: string + network_function_chain_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: network_function_chain + name: + type: string + type: + type: string + mac_address: + type: string + ip_endpoint_list: + type: array + items: + type: object + properties: + ip: + type: string + type: + type: string + default: ASSIGNED + enum: [ASSIGNED, LEARNED] + +{%- endmacro %} + + +{% macro ahvBootConfig() -%} + +title: AHV Boot Configuration +type: [object, "null"] +properties: + boot_device: + type: object + properties: + type: + type: string + disk_address: + type: object + properties: + type: + type: string + device_index: + type: integer + adapter_type: + type: string + boot_type: + type: string + enum: [LEGACY, UEFI, SECURE_BOOT, ''] + type: + type: string + mac_address: + type: string + +{%- endmacro %} + + +{% macro ahvDisk() -%} + +title: AHV Disk +type: object +properties: + data_source_reference: + type: [object, "null"] + properties: + name: + type: string + kind: + type: string + enum: [image, app_package] + uuid: + type: string + type: + type: string + type: + type: string + device_properties: + type: object + properties: + device_type: + type: string + type: + type: string + disk_address: + type: object + properties: + device_index: + type: integer + adapter_type: + type: string + type: + type: string + disk_size_mib: + type: integer + default: 0 + volume_group_reference: + type: [object, "null"] + properties: + name: + type: string + kind: + type: string + uuid: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro ahvCloudInitScript() -%} + +title: AHV CLOUD INIT Script +type: object +properties: + cloud_init: + type: object + properties: + user_data: + type: string + meta_data: + type: string + type: + type: string + sysprep: + type: ["null"] + type: + type: string + +{%- endmacro %} + + +{% macro ahvSysPrepScript() -%} + +title: AHV Sys Prep Script +type: object +properties: + cloud_init: + type: ["null"] + type: + type: string + sysprep: + type: object + properties: + unattend_xml: + type: string + install_type: + type: string + enum: [PREPARED, FRESH] + is_domain: + type: boolean + domain: + type: string + dns_ip: + type: string + dns_search_path: + type: string + type: + type: string + domain_credential_reference: # Review after CALM-15575 is resolved + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: subnet + name: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro nutanixSnapshotSchedule() -%} + +title: Nutanix Snapshot Schedule +type: object +properties: + type: + type: string + is_suspended: + type: boolean + interval_multiple: + type: integer + duration_secs: + type: integer + end_time: + type: integer + interval_type: + type: string + +{%- endmacro %} + + +{% macro nutanixSnapshotScheduleInfo() -%} + +title: Nutanix Snapshot Schedule Information +type: object +properties: + type: + type: string + remote_retention_quantity: + type: integer + snapshot_type: + type: string + local_retention_quantity: + type: integer + schedule: + {{ nutanixSnapshotSchedule() | indent(4) }} + +{%- endmacro %} + + +{% macro nutanixSnapshotPolicy() -%} + +title: Nutanix Snapshot Policy +type: object +properties: + type: + type: string + replication_target: + type: object + properties: + cluster_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: cluster + name: + type: string + type: + type: string + availability_zone_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: availability_zone + name: + type: string + type: + type: string + snapshot_schedule_list: + type: array + items: + {{ nutanixSnapshotScheduleInfo() | indent(6) }} + +{%- endmacro %} + + +{% macro nutanixBackupPolicy() -%} + +title: Nutanix Backup Policy +type: [object, "null"] +properties: + type: + type: string + default_snapshot_type: + type: string + consistency_group_identifier: + type: string + snapshot_policy_list: + type: array + items: + {{ nutanixSnapshotPolicy() | indent(6) }} + +{%- endmacro %} + + +{% macro ahvGPU() -%} + +title: AHV GPU +type: object +properties: + vendor: + type: string + mode: + type: string + device_id: + type: integer + type: + type: string + +{%- endmacro %} + + +{% macro ahvSerialPort() -%} + +title: AHV Serial Port +type: object +properties: + index: + type: integer + is_connected: + type: boolean + default: False + type: + type: string + +{%- endmacro %} + + +{% macro ahvGuestTools() -%} + +title: AHV Guest Tools +type: [object, "null"] +properties: + type: + type: string + nutanix_guest_tools: + type: object + properties: + state: + type: string + version: + type: string + ngt_state: + type: string + iso_mount_state: + type: string + type: + type: string + credentials: + type: object + properties: + username: + type: string + password: + type: string + type: + type: string + enabled_capability_list: + type: array + items: + type: string + +{%- endmacro %} + + +{% macro ahvResources() -%} + +title: AHV Resources +type: object +properties: + account_uuid: + type: string + type: + type: string + nic_list: + type: array + items: + {{ ahvNIC() | indent(6) }} + num_vcpus_per_socket: + type: integer + num_sockets: + type: integer + memory_size_mib: + type: integer + cluster_uuid: + type: string + power_state: + type: string + enum: [ON, OFF] + gpu_list: + type: array + items: + {{ ahvGPU() | indent(6) }} + hardware_clock_timezone: + type: string + parent_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + name: + type: string + type: + type: string + machine_type: + type: string + guest_tools: + {{ ahvGuestTools() | indent(4) }} + disk_list: + type: array + items: + {{ ahvDisk() | indent(6) }} + boot_config: + {{ ahvBootConfig() | indent(4) }} + guest_customization: + anyOf: + - type: ["null"] + - $ref: '#/components/schemas/cloud_init_script' + - $ref: '#/components/schemas/sys_prep_script' + serial_port_list: + type: array + items: + {{ ahvSerialPort() | indent(6) }} + +{%- endmacro %} + + +{% macro ahvCreateSpec() -%} + +title: AHV CreateSpec +type: object +properties: + name: + type: string + categories: + type: [object, string] + resources: + {{ ahvResources() | indent(4) }} + type: + type: string + cluster_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: cluster + name: + type: string + type: + type: string + availability_zone_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: availability_zone + name: + type: string + type: + type: string + backup_policy: + {{ nutanixBackupPolicy() | indent(4) }} + +{%- endmacro %} + +info: + title: AHV_VM + description: AHV VM spec payload using v3 API + version: 3.0.1 # TODO add right version of ahv schema + +components: + schemas: + provider_spec: + {{ ahvCreateSpec() | indent(6) }} + cloud_init_script: + {{ ahvCloudInitScript() | indent(6) }} + sys_prep_script: + {{ ahvSysPrepScript() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/ahv_vm/constants.py b/framework/calm/dsl/providers/plugins/ahv_vm/constants.py new file mode 100644 index 0000000..7a3fa29 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/ahv_vm/constants.py @@ -0,0 +1,18 @@ +class AHV: + DEVICE_TYPES = {"CD_ROM": "CDROM", "DISK": "DISK"} + + DEVICE_BUS = { + "CDROM": {"SATA": "SATA", "IDE": "IDE"}, + "DISK": {"SCSI": "SCSI", "PCI": "PCI"}, + } + IMAGE_TYPES = {"DISK": "DISK_IMAGE", "CDROM": "ISO_IMAGE"} + + GUEST_CUSTOMIZATION_SCRIPT_TYPES = ["cloud_init", "sysprep"] + + SYS_PREP_INSTALL_TYPES = ["FRESH", "PREPARED"] + BOOT_TYPES = {"Legacy BIOS": "LEGACY", "UEFI": "UEFI"} + OPERATION_TYPES = { + "DISK": ["CLONE_FROM_IMAGE", "ALLOCATE_STORAGE_CONTAINER"], + "CDROM": ["CLONE_FROM_IMAGE", "EMPTY_CDROM"], + } + OPERATING_SYSTEM = {"LINUX": "Linux", "WINODWS": "Windows"} diff --git a/framework/calm/dsl/providers/plugins/ahv_vm/main.py b/framework/calm/dsl/providers/plugins/ahv_vm/main.py new file mode 100644 index 0000000..bc7ee22 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/ahv_vm/main.py @@ -0,0 +1,1836 @@ +import click +import re +import sys +import json +import copy + +from ruamel import yaml +from distutils.version import LooseVersion as LV +from collections import OrderedDict + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.providers import get_provider_interface +from calm.dsl.tools import StrictDraft7Validator +from calm.dsl.log import get_logging_handle + +from .constants import AHV as AhvConstants + +LOG = get_logging_handle(__name__) +Provider = get_provider_interface() + + +# Implements Provider interface for AHV_VM +class AhvVmProvider(Provider): + + provider_type = "AHV_VM" + package_name = __name__ + spec_template_file = "ahv_vm_provider_spec.yaml.jinja2" + + @classmethod + def create_spec(cls): + client = get_api_client() + create_spec(client) + + @classmethod + def update_vm_image_config(cls, spec, disk_packages={}): + """Ex: disk_packages = {disk_index: vmImageClass}""" + disk_list = spec["resources"].get("disk_list", []) + + for disk_ind, img_cls in disk_packages.items(): + if disk_ind > len(disk_list): + raise ValueError("invalid disk address ({})".format(disk_ind)) + + disk = disk_list[disk_ind - 1] + if "data_source_reference" not in disk: + raise ValueError( + "unable to set downloadable image in disk {}".format(disk_ind) + ) + + pkg = img_cls.compile() + vm_image_type = pkg["options"]["resources"]["image_type"] + disk_img_type = AhvConstants.IMAGE_TYPES[ + disk["device_properties"]["device_type"] + ] + + if vm_image_type != disk_img_type: + raise TypeError("image type mismatch in disk {}".format(disk_ind)) + + # Set the reference of this disk + disk["data_source_reference"] = img_cls.get_ref().compile() + + @classmethod + def get_runtime_editables( + cls, sub_editable_spec, project_id, substrate_spec, vm_img_map={} + ): + """Fetch runtime editables at runtime""" + + client = get_api_client() + Obj = cls.get_api_obj() + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + subnets_list = [] + cluster_list = [] + vpc_list = [] + for subnet in project["status"]["resources"]["subnet_reference_list"]: + subnets_list.append(subnet["uuid"]) + + # Extending external subnet's list from remote account + for subnet in project["status"]["resources"].get("external_network_list"): + subnets_list.append(subnet["uuid"]) + + for cluster in project["status"]["resources"].get("cluster_reference_list", []): + cluster_list.append(cluster["uuid"]) + + for vpc in project["status"]["resources"].get("vpc_reference_list", []): + vpc_list.append(vpc["uuid"]) + + accounts = project["status"]["resources"]["account_reference_list"] + + reg_accounts = [] + for account in accounts: + reg_accounts.append(account["uuid"]) + + # As account_uuid is required for versions>2.9.0 + account_uuid = "" + is_host_pc = True + payload = {"length": 250, "filter": "type==nutanix_pc"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + entity_id = entity["metadata"]["uuid"] + if entity_id in reg_accounts: + account_uuid = entity_id + break + + # TODO Host PC dependency for categories call due to bug https://jira.nutanix.com/browse/CALM-17213 + if account_uuid: + payload = {"length": 250, "filter": "_entity_id_=={}".format(account_uuid)} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + provider_data = res["entities"][0]["status"]["resources"]["data"] + is_host_pc = provider_data["host_pc"] + + # Getting the readiness probe details + runtime_readiness_probe = sub_editable_spec["value"].get("readiness_probe", {}) + runtime_spec = sub_editable_spec["value"].get("spec", {}) + runtime_cluster_spec = runtime_spec.get("cluster_reference", {}) + if ( + runtime_spec.get("resources") + or runtime_spec.get("categories") + or runtime_readiness_probe + or runtime_cluster_spec + ): + click.secho( + "\n-- Substrate {} --\n".format( + highlight_text( + sub_editable_spec["context"] + "." + sub_editable_spec["name"] + ) + ) + ) + + else: + # Nothing to get input + return + + for k, v in runtime_readiness_probe.items(): + new_val = click.prompt( + "Value for {} [{}]".format(k, highlight_text(v)), + default=v, + show_default=False, + ) + runtime_readiness_probe[k] = new_val + + sub_create_spec = substrate_spec["create_spec"] + downloadable_images = list(vm_img_map.keys()) + vm_os = substrate_spec["os_type"] + + # Cluster + runtime_cluster = {} + if runtime_cluster_spec: + filter_query = "(_entity_id_=={})".format( + ",_entity_id_==".join(cluster_list) + ) + clusters = Obj.clusters( + account_uuid=account_uuid, filter_query=filter_query + ) + clusters = clusters["entities"] + click.echo("Choose from below clusters:") + i = 1 + cfg_cluster_idx = 1 + LOG.debug("Clusters: {}".format(clusters)) + for cluster in clusters: + click.echo( + "{}. {}, UUID:{}".format( + i, + cluster.get("spec", {}).get("name"), + cluster.get("metadata", {}).get("uuid"), + ) + ) + if cluster.get("metadata", {}).get("uuid") == runtime_cluster_spec.get( + "uuid" + ): + cluster_name = cluster.get("spec", {}).get("name") + cfg_cluster_idx = i + i += 1 + + cluster_idx = click.prompt( + "Enter index for cluster[{}]".format(highlight_text(cluster_name)), + type=click.IntRange(1, len(cluster_list) + 1), + default=cfg_cluster_idx, + ) + if cluster_idx != cfg_cluster_idx: + runtime_cluster_spec["uuid"] = ( + clusters[cluster_idx - 1].get("metadata", {}).get("uuid") + ) + runtime_cluster_spec["name"] = ( + clusters[cluster_idx - 1].get("spec", {}).get("name") + ) + runtime_cluster = runtime_cluster_spec + + # NAME + vm_name = runtime_spec.get("name", None) + if vm_name: + vm_name = click.prompt( + "\nName of vm [{}]".format(highlight_text(vm_name)), + default=vm_name, + show_default=False, + ) + runtime_spec["name"] = vm_name + + # Check for categories + if "categories" in runtime_spec.keys(): + avl_categories = {} + if runtime_spec["categories"]: + avl_categories = json.loads(runtime_spec["categories"]) + + click.echo("\n\t", nl=False) + click.secho("Categories\n", underline=True) + + click.echo("Available categories:") + for name, value in avl_categories.items(): + click.echo( + "\t {}:{}".format(highlight_text(name), highlight_text(value)) + ) + + choice = click.prompt( + "\nWant to edit categories (y/n) [{}]".format(highlight_text("n")), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + if choice == "y": + choice = click.prompt( + "Want to delete any category (y/n) [{}]".format( + highlight_text("n") + ), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + while choice == "y": + if not avl_categories: + click.echo("No existing categories available") + break + + family = click.prompt( + "Enter the family of category (leave empty to skip)", + default="", + show_default=False, + ) + if not family: + break + + if family not in avl_categories.keys(): + click.echo("Invalid family") + + else: + avl_categories.pop(family) + choice = click.prompt( + "\nWant to delete other category (y/n) [{}]".format( + highlight_text("n") + ), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + choice = click.prompt( + "\nWant to add any category (y/n) [{}]".format(highlight_text("n")), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + if choice == "y": + categories = Obj.categories( + host_pc=is_host_pc, account_uuid=account_uuid + ) + click.echo("Choose from given categories:") + for ind, group in enumerate(categories): + category = "{}:{}".format(group["key"], group["value"]) + click.echo( + "\t {}. {} ".format(str(ind + 1), highlight_text(category)) + ) + + while choice == "y": + index = click.prompt( + "Enter the index of category (0 to skip)", default=0 + ) + if not index: + break + + if (index > len(categories)) or (index <= 0): + click.echo("Invalid index !!! ") + else: + group = categories[index - 1] + key = group["key"] + + if avl_categories.get("key"): + click.echo( + "Category corresponding to key {} already exists ".format( + key + ) + ) + else: + avl_categories[key] = group["value"] + + choice = click.prompt( + "\nWant to add other category (y/n) [{}]".format( + highlight_text("n") + ), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + runtime_spec["categories"] = json.dumps(avl_categories) + + resources = runtime_spec.get("resources", {}) + # NICS + nic_list = resources.get("nic_list", {}) + # Normal Nic for now + if nic_list: + click.echo("\n\t", nl=False) + click.secho("NICS data\n", underline=True) + + filter_query = "_entity_id_=in={}".format("|".join(subnets_list)) + nics = Obj.subnets(account_uuid=account_uuid, filter_query=filter_query) + nics = nics["entities"] + + vpcs = [] + if vpc_list: + vpc_filter_query = "(_entity_id_=={})".format( + ",_entity_id_==".join(vpc_list) + ) + + vpcs = Obj.vpcs( + account_uuid=account_uuid, filter_query=vpc_filter_query + ) + vpcs = vpcs["entities"] + + vpc_id_name_map = {} + for vpc in vpcs: + LOG.debug("VPC information:{}".format(vpc)) + vpc_id = vpc.get("metadata", {}).get("uuid") + vpc_name = vpc.get("spec", {}).get("name") + if not vpc_id_name_map.get(vpc_id, None): + vpc_id_name_map[vpc_id] = vpc_name + + nic_cluster_data = {} + subnet_id_name_map = {} + + for nic in nics: + nic_name = nic["status"]["name"] + cluster_name = nic["status"].get("cluster_reference", {}).get("name") + vpc_uuid = ( + nic["status"] + .get("resources", {}) + .get("vpc_reference", {}) + .get("uuid") + ) + nic_uuid = nic["metadata"]["uuid"] + subnet_id_name_map[nic_uuid] = nic_name + + if nic_cluster_data.get(nic_name): + nic_cluster_data[nic_name].append( + (cluster_name, nic_uuid, vpc_uuid) + ) + else: + nic_cluster_data[nic_name] = [(cluster_name, nic_uuid, vpc_uuid)] + + selected_cluster_name = "" + selected_vpc_uuid = "" + for nic_index, nic_data in nic_list.items(): + click.echo("\n--Nic {} -- ".format(nic_index)) + nic_uuid = (nic_data.get("subnet_reference") or {}).get("uuid") + if not nic_uuid: + continue + + click.echo("Choose from given subnets:") + for ind, name in enumerate(nic_cluster_data.keys()): + if len(nic_cluster_data[name]) == 1: + vpc_uuid = nic_cluster_data[name][0][2] + cluster_name = nic_cluster_data[name][0][0] + if cluster_name: + if ( + runtime_cluster + and runtime_cluster.get("name") + and runtime_cluster.get("name") != cluster_name + ): + # Skip this subnet as cluster is already selected. So choosing this VLAN subnet will lead to validation errors + LOG.debug( + "Skipping this subnet as cluster is already pre-decided:{}".format( + cluster_name + ) + ) + continue + if ( + selected_cluster_name + and selected_cluster_name != cluster_name + ): + # Skip this subnet as cluster is already selected. So choosing this VLAN subnet will lead to validation errors + LOG.debug( + "Skipping this subnet as cluster is already pre-decided:{}".format( + cluster_name + ) + ) + continue + + click.echo( + "\t {}. {}, Cluster: {}".format( + str(ind + 1), name, cluster_name + ) + ) + else: + if selected_vpc_uuid and selected_vpc_uuid != vpc_uuid: + # Skip this subnet as VPC is already selected. So choosing this Overlay subnet will lead to validation errors + LOG.debug( + "Skipping this VPC option because VPC is pre-decided: {}".format( + vpc_uuid + ) + ) + continue + click.echo( + "\t {}. {}, VPC: {}".format( + str(ind + 1), name, vpc_id_name_map.get(vpc_uuid) + ) + ) + else: + click.echo("\t {}. {} (expand)".format(str(ind + 1), name)) + + nic_name = subnet_id_name_map.get(nic_uuid, "") + + if nic_uuid.startswith("@@") and nic_uuid.endswith("@@"): + # It will be macro + var_name = nic_uuid[3:-8] + nic_name = "@@{" + var_name + "}@@" + + new_val = click.prompt( + "Subnet for nic {} [{}]".format( + nic_index, highlight_text(nic_name) + ), + default=nic_name, + show_default=False, + ) + if new_val.startswith("@@") and new_val.endswith("@@"): + # Macro case + var_name = new_val[3:-3] + nic_uuid = "@@{" + var_name + ".uuid}@@" + nic_data["subnet_reference"].update({"uuid": nic_uuid, "name": ""}) + + else: + if not nic_cluster_data.get(new_val): + LOG.error("Invalid nic name") + sys.exit(-1) + + nc_list = nic_cluster_data[new_val] + if len(nc_list) == 1: + nic_data["subnet_reference"].update( + {"name": new_val, "uuid": nc_list[0][1]} + ) + vpc_uuid = nc_list[0][2] if nc_list[0][2] else None + if vpc_uuid: + # This is an Overlay Subnet, in case of Overlay subnets if subnet is runtime then vpc is also runtime. + vpc_name = vpc_id_name_map.get(vpc_uuid, "") + nic_data["vpc_reference"] = { + "name": vpc_name, + "uuid": vpc_uuid, + "kind": "vpc", + } + + else: + # Getting the cluster for supplied nic + click.echo( + "Multiple subnet with same name exists. Select from given clusters/VPCs:" + ) + for nc_ind, cluster_data in enumerate(nc_list): + cluster_name = cluster_data[0] + vpc_uuid = cluster_data[2] + if cluster_name: + click.echo( + "\t {}. {}".format(str(nc_ind + 1), cluster_name) + ) + elif vpc_uuid: + click.echo( + "\t {}. {}".format( + str(nc_ind + 1), + vpc_id_name_map.get(vpc_uuid, ""), + ) + ) + + ind = click.prompt( + "Enter index for cluster/VPC", + type=click.IntRange(1, len(nc_list)), + default=1, + ) + nic_data["subnet_reference"].update( + {"name": new_val, "uuid": nc_list[ind - 1][1]} + ) + + new_selected_cluster_name = nic_list[ind - 1][0] + new_selected_vpc_uuid = nic_list[ind - 1][2] + + if ( + selected_cluster_name + and new_selected_cluster_name + and selected_cluster_name != new_selected_cluster_name + ): + LOG.error( + "Change of cluster not supported in multiple NICs, previous: {}, now: {}".format( + selected_cluster_name, new_selected_cluster_name + ) + ) + sys.exit(-1) + elif ( + selected_vpc_uuid + and new_selected_vpc_uuid + and selected_vpc_uuid != new_selected_vpc_uuid + ): + LOG.error( + "Change of VPC not supported in multiple NICs, previous: {}, now: {}".format( + vpc_id_name_map.get(selected_vpc_uuid), + vpc_id_name_map.get(new_selected_vpc_uuid), + ) + ) + sys.exit(-1) + elif ( + selected_vpc_uuid + and new_selected_cluster_name + or selected_cluster_name + and new_selected_vpc_uuid + ): + LOG.error( + "Change of subnet type from OVERLAY to VLAN and vice versa is not supported" + ) + sys.exit(-1) + + if vpc_uuid: + nic_data["vpc_reference"] = { + "name": vpc_id_name_map.get(vpc_uuid), + "uuid": vpc_uuid, + "kind": "vpc", + } + + # DISKS + disk_list = resources.get("disk_list", {}) + bp_disks = sub_create_spec["resources"]["disk_list"] + if disk_list: + click.echo("\n\t", nl=False) + click.secho("DISKS data", underline=True) + for disk_ind, disk_data in disk_list.items(): + click.echo("\n-- Data Disk {} --".format(disk_ind)) + bp_disk_data = bp_disks[int(disk_ind)] + + if "device_properties" in disk_data.keys(): + device_prop = disk_data.get("device_properties", {}) + device_type = device_prop.get("device_type", None) + + if device_type: + click.echo("\nChoose from given Device Types :") + device_types = list(AhvConstants.DEVICE_TYPES.keys()) + for ind, dt in enumerate(device_types): + click.echo("\t{}. {}".format(ind + 1, dt)) + + new_val = click.prompt( + "Device Type name for data disk {} [{}]".format( + disk_ind, highlight_text(device_type) + ), + type=click.Choice(device_types), + show_choices=False, + default=device_type, + show_default=False, + ) + + device_type = AhvConstants.DEVICE_TYPES[new_val] + # Change the data dict + device_prop["device_type"] = device_type + + else: + device_type = bp_disk_data["device_properties"]["device_type"] + + disk_address = device_prop.get("disk_address", {}) + device_bus = disk_address.get("adapter_type", None) + + if device_bus: + device_bus_list = list( + AhvConstants.DEVICE_BUS[device_type].keys() + ) + if device_bus not in device_bus_list: + device_bus = device_bus_list[0] + + click.echo("\nChoose from given Device Buses :") + for ind, db in enumerate(device_bus_list): + click.echo("\t{}. {}".format(ind + 1, db)) + + new_val = click.prompt( + "Device Bus for data disk {} [{}]".format( + disk_ind, highlight_text(device_bus) + ), + type=click.Choice(device_bus_list), + show_choices=False, + default=device_bus, + show_default=False, + ) + + device_bus = new_val if new_val else device_bus + device_prop["disk_address"][ + "adapter_type" + ] = AhvConstants.DEVICE_BUS[device_type][device_bus] + + else: + device_type = bp_disk_data["device_properties"]["device_type"] + + is_data_ref_present = "data_source_reference" in disk_data.keys() + is_size_present = "disk_size_mib" in disk_data.keys() + + if is_data_ref_present and is_size_present: + # Check for the operation + operation_list = AhvConstants.OPERATION_TYPES[device_type] + click.echo("\nChoose from given Operation:") + for ind, op in enumerate(operation_list): + click.echo("\t{}. {}".format(ind + 1, op)) + + # Choose default operation + op = "CLONE_FROM_IMAGE" + if (disk_data["data_source_reference"] is None) and ( + disk_data["disk_size_mib"] == 0 + ): + op = "EMPTY_CDROM" + + elif disk_data["disk_size_mib"]: + op = "ALLOCATE_STORAGE_CONTAINER" + + op = click.prompt( + "Enter the operation type for data disk {} [{}]".format( + disk_ind, highlight_text(op) + ), + default=op, + type=click.Choice(operation_list), + show_choices=False, + show_default=False, + ) + + elif is_data_ref_present: + op = "CLONE_FROM_IMAGE" + + elif is_size_present: + op = "ALLOCATE_STORAGE_CONTAINER" + + else: + op = None + + if op == "CLONE_FROM_IMAGE": + data_source_ref = ( + disk_data["data_source_reference"] + if disk_data.get("data_source_reference") + else {} + ) + + res = Obj.images(account_uuid=account_uuid) + imagesNameUUIDMap = {} + for entity in res.get("entities", []): + img_type = entity["status"]["resources"].get("image_type", None) + + # Ignoring images, if they don't have any image type(Ex: Karbon Image) + if not img_type: + continue + + if img_type == AhvConstants.IMAGE_TYPES[device_type]: + imagesNameUUIDMap[entity["status"]["name"]] = entity[ + "metadata" + ]["uuid"] + + images = list(imagesNameUUIDMap.keys()) + if not (images or downloadable_images): + LOG.error( + "No images found for device type: {}".format(device_type) + ) + sys.exit(-1) + + img_name = data_source_ref.get("name", images[0]) + if (img_name not in images) and ( + img_name not in downloadable_images + ): + img_name = images[0] if images else downloadable_images[0] + + click.echo("\nChoose from given images:") + if images: + click.secho("Disk Images", bold=True) + for ind, name in enumerate(images): + click.echo("\t {}. {}".format(str(ind + 1), name)) + + if downloadable_images: + click.secho("Downloadable Images", bold=True) + for ind, name in enumerate(downloadable_images): + click.echo("\t {}. {}".format(str(ind + 1), name)) + + all_images = images + downloadable_images + img_name = click.prompt( + "\nImage for data disk {} [{}]".format( + disk_ind, highlight_text(img_name) + ), + default=img_name, + type=click.Choice(all_images), + show_choices=False, + show_default=False, + ) + + is_normal_image = "y" + if (img_name in images) and (img_name in downloadable_images): + is_normal_image = click.prompt( + "Is it Disk Image (y/n) [{}]".format(highlight_text("y")), + default="y", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + elif img_name in images: + is_normal_image = "y" + + else: + is_normal_image = "n" + + if is_normal_image == "y": + disk_data["data_source_reference"] = { + "kind": "image", + "name": img_name, + "uuid": imagesNameUUIDMap[img_name], + } + + else: + disk_data["data_source_reference"] = { + "kind": "app_package", + "name": img_name, + "uuid": vm_img_map[img_name], + } + + elif op == "ALLOCATE_STORAGE_CONTAINER": + size = disk_data.get("disk_size_mib", 0) + size = int(size / 1024) + + size = click.prompt( + "\nSize of disk {} (GiB) [{}]".format( + disk_ind, highlight_text(size) + ), + default=size, + show_default=False, + ) + disk_data["disk_size_mib"] = size * 1024 + + elif op == "EMPTY_CDROM": + disk_data["data_source_reference"] = None + disk_data["disk_size_mib"] = 0 + + # num_sockets + vCPUs = resources.get("num_sockets", None) + if vCPUs: + vCPUS = click.prompt( + "\nvCPUS for the vm [{}]".format(highlight_text(vCPUs)), + default=vCPUs, + show_default=False, + ) + resources["num_sockets"] = vCPUS + + # num_vcpu_per_socket + cores_per_vcpu = resources.get("num_vcpus_per_socket", None) + if cores_per_vcpu: + cores_per_vcpu = click.prompt( + "\nCores per vCPU for the vm [{}]".format( + highlight_text(cores_per_vcpu) + ), + default=cores_per_vcpu, + show_default=False, + ) + resources["num_vcpus_per_socket"] = cores_per_vcpu + + # memory + memory_size_mib = resources.get("memory_size_mib", None) + if memory_size_mib: + memory_size_mib = int(memory_size_mib / 1024) + memory_size_mib = click.prompt( + "\nMemory(GiB) for the vm [{}]".format(highlight_text(memory_size_mib)), + default=memory_size_mib, + show_default=False, + ) + resources["memory_size_mib"] = memory_size_mib * 1024 + + # serial ports + serial_ports = resources.get("serial_port_list", {}) + if serial_ports: + click.echo("\n\t", nl=False) + click.secho("Serial Ports data", underline=True) + for ind, port_data in serial_ports.items(): + is_connected = port_data["is_connected"] + new_val = "y" if is_connected else "n" + + new_val = click.prompt( + "\nConnection status for serial port {} (y/n) [{}]".format( + ind, highlight_text(new_val) + ), + default=new_val, + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + if new_val == "y": + port_data["is_connected"] = True + + else: + port_data["is_connected"] = False + + # Guest Customization + if "guest_customization" in resources.keys(): + click.echo("\n\t", nl=False) + click.secho("Guest Customization", underline=True) + guest_cus = ( + resources["guest_customization"] + if resources.get("guest_customization") + else {} + ) + + choice = click.prompt( + "\nEdit Guest Customization (y/n) [{}]".format(highlight_text("n")), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + if choice == "y": + if vm_os == AhvConstants.OPERATING_SYSTEM["LINUX"]: + cloud_init = ( + guest_cus["cloud_init"] if guest_cus.get("cloud_init") else {} + ) + + user_data = cloud_init.get("user_data", "") + user_data = click.prompt( + "\nUser data for guest customization for VM [{}]".format( + highlight_text(user_data) + ), + default=user_data, + show_default=False, + ) + guest_cus.update({"cloud_init": {"user_data": user_data}}) + + else: + sysprep = guest_cus["sysprep"] if guest_cus.get("sysprep") else {} + choice = click.prompt( + "Want to enter sysprep data (y/n) [{}]".format( + highlight_text("n") + ), + default="n", + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + + if choice[0] == "y": + install_types = AhvConstants.SYS_PREP_INSTALL_TYPES + install_type = sysprep.get("install_type", install_types[0]) + + click.echo("\nChoose from given install types ") + for index, value in enumerate(install_types): + click.echo("\t {}. {}".format(str(index + 1), value)) + + install_type = click.prompt( + "Install type [{}]".format(highlight_text(install_type)), + default=install_type, + show_default=False, + type=click.Choice(install_types), + show_choices=False, + ) + sysprep["install_type"] = install_type + + unattend_xml = sysprep.get("unattend_xml", "") + unattend_xml = click.prompt( + "\nUnattend XML [{}]".format(highlight_text(unattend_xml)), + default=unattend_xml, + show_default=False, + ) + sysprep["unattend_xml"] = unattend_xml + + is_domain = "y" if sysprep.get("is_domain", False) else "n" + is_domain = click.prompt( + "\nJoin a domain (y/n) [{}]".format( + highlight_text(is_domain) + ), + default=is_domain, + show_default=False, + type=click.Choice(["y", "n"]), + show_choices=False, + ) + is_domain = True if is_domain[0] == "y" else False + sysprep["is_domain"] = is_domain + + if is_domain: + domain = sysprep.get("domain", "") + domain = click.prompt( + "\nDomain name [{}]".format(highlight_text(domain)), + default=domain, + show_default=False, + ) + sysprep["domain"] = domain + + dns_ip = sysprep.get("dns_ip", "") + dns_ip = click.prompt( + "\nDNS IP [{}]".format(highlight_text(dns_ip)), + default=dns_ip, + show_default=False, + ) + sysprep["dns_ip"] = dns_ip + + dns_search_path = sysprep.get("dns_search_path", "") + dns_search_path = click.prompt( + "\nDNS Search Path [{}]".format( + highlight_text(dns_search_path) + ), + default=dns_search_path, + show_default=False, + ) + sysprep["dns_search_path"] = dns_search_path + # TODO add support for credential too + guest_cus["sysprep"] = sysprep + resources["guest_customization"] = guest_cus + + @classmethod + def get_api_obj(cls): + """returns object to call ahv provider specific apis""" + + client = get_api_client() + # TODO remove this mess + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + api_handlers = AhvBase.api_handlers + + # Return min version that is greater or equal to user calm version + supported_versions = [] + for k in api_handlers.keys(): + if LV(k) <= LV(calm_version): + supported_versions.append(k) + + latest_version = max(supported_versions, key=lambda x: LV(x)) + api_handler = api_handlers[latest_version] + return api_handler(client.connection) + + +class AhvBase: + """Base class for ahv provider specific apis""" + + api_handlers = OrderedDict() + __api_version__ = None + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + version = getattr(cls, "__api_version__") + if version: + cls.api_handlers[version] = cls + + @classmethod + def get_version(cls): + return getattr(cls, "__api_version__") + + def images(self, *args, **kwargs): + raise NotImplementedError("images call not implemented") + + def subnets(self, *args, **kwargs): + raise NotImplementedError("subnets call not implemented") + + def categories(self, *args, **kwargs): + raise NotImplementedError("categories call not implemented") + + +class AhvNew(AhvBase): + """ahv api object for calm_version >= 2.9.0""" + + __api_version__ = "2.9.0" + SUBNETS = "nutanix/v1/subnets" + IMAGES = "nutanix/v1/images" + CLUSTERS = "nutanix/v1/clusters" + VPCS = "nutanix/v1/vpcs" + GROUPS = "nutanix/v1/groups" + CATEGORIES_PAYLOAD = { + "entity_type": "category", + "filter_criteria": "name!=CalmApplication;name!=CalmDeployment;name!=CalmService;name!=CalmPackage;name!=CalmProject;name!=CalmUser;name!=CalmVmUniqueIdentifier;name!=CalmClusterUuid", + "grouping_attribute": "abac_category_key", + "group_sort_attribute": "name", + "group_count": 60, + "group_attributes": [ + {"attribute": "name", "ancestor_entity_type": "abac_category_key"} + ], + "group_member_count": 1000, + "group_member_offset": 0, + "group_member_sort_attribute": "value", + "group_member_attributes": [{"attribute": "value"}], + "query_name": "prism:CategoriesQueryModel", + } + + def __init__(self, connection): + self.connection = connection + + def images(self, *args, **kwargs): + Obj = get_resource_api(self.IMAGES, self.connection) + limit = kwargs.get("length", 250) + offset = kwargs.get("offset", 0) + filter_query = kwargs.get("filter_query", "") + + account_uuid = kwargs.get("account_uuid", None) + if account_uuid: + filter_query = filter_query + ";account_uuid=={}".format(account_uuid) + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list(params, ignore_error=True) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + return res + + def subnets(self, *args, **kwargs): + Obj = get_resource_api(self.SUBNETS, self.connection) + limit = kwargs.get("length", 250) + offset = kwargs.get("offset", 0) + filter_query = kwargs.get("filter_query", "") + + account_uuid = kwargs.get("account_uuid", None) + if account_uuid: + filter_query = filter_query + ";account_uuid=={}".format(account_uuid) + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list_all(base_params=params, ignore_error=True) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + return {"entities": res} + + def categories(self, *args, **kwargs): + payload = copy.deepcopy(self.CATEGORIES_PAYLOAD) + host_pc = kwargs.get("host_pc", False) + + # Note: Api response is bugged due to CALM-17213 + # TODO: Remove dependecy for host_pc after bug is resolved + if host_pc: + Obj = get_resource_api("groups", self.connection) + res, err = Obj.create(payload) + + else: + Obj = get_resource_api(self.GROUPS, self.connection) + account_uuid = kwargs.get("account_uuid", None) + if account_uuid: + payload["filter_criteria"] = payload[ + "filter_criteria" + ] + ";account_uuid=={}".format(account_uuid) + + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + categories = [] + for group in res["group_results"]: + key = group["group_summaries"]["sum:name"]["values"][0]["values"][0] + for entity in group["entity_results"]: + value = entity["data"][0]["values"][0]["values"][0] + categories.append({"key": key, "value": value}) + + return categories + + def clusters(self, *args, **kwargs): + Obj = get_resource_api(self.CLUSTERS, self.connection) + limit = kwargs.get("length", 250) + offset = kwargs.get("offset", 0) + filter_query = kwargs.get("filter_query", "") + + account_uuid = kwargs.get("account_uuid", None) + if account_uuid: + filter_query = filter_query + ";account_uuid=={}".format(account_uuid) + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list(params, ignore_error=True) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + return res + + def vpcs(self, *args, **kwargs): + Obj = get_resource_api(self.VPCS, self.connection) + limit = kwargs.get("length", 500) + offset = kwargs.get("offset", 0) + ignore_failures = kwargs.get("ignore_failures", False) + + filter_query = kwargs.get("filter_query", "") + + account_uuid = kwargs.get("account_uuid", None) + if account_uuid: + filter_query = filter_query + ";account_uuid=={}".format(account_uuid) + + if filter_query.startswith(";"): + filter_query = filter_query[1:] + + params = {"length": limit, "offset": offset, "filter": filter_query} + LOG.debug(params) + res, err = Obj.list(params, ignore_error=True) + if err: + if ignore_failures: + LOG.warning("Failed to query VPCs due to: {}, ignoring".format(err)) + return None + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + return res + + +class Ahv(AhvBase): + """ahv api object for calm_version < 2.9.0""" + + # TODO Replace with proper api version + __api_version__ = "0" + SUBNETS = "subnets" + IMAGES = "images" + GROUPS = "groups" + CATEGORIES_PAYLOAD = { + "entity_type": "category", + "filter_criteria": "name!=CalmApplication;name!=CalmDeployment;name!=CalmService;name!=CalmPackage;name!=CalmProject;name!=CalmUser;name!=CalmVmUniqueIdentifier;name!=CalmClusterUuid", + "grouping_attribute": "abac_category_key", + "group_sort_attribute": "name", + "group_count": 60, + "group_attributes": [ + {"attribute": "name", "ancestor_entity_type": "abac_category_key"} + ], + "group_member_count": 1000, + "group_member_offset": 0, + "group_member_sort_attribute": "value", + "group_member_attributes": [{"attribute": "value"}], + "query_name": "prism:CategoriesQueryModel", + } + + def __init__(self, connection): + self.connection = connection + + def images(self, *args, **kwargs): + Obj = get_resource_api(self.IMAGES, self.connection) + limit = kwargs.get("length", 250) + offset = kwargs.get("offset", 0) + filter_query = kwargs.get("filter_query", "") + + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list(params, ignore_error=True) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + return res + + def subnets(self, *args, **kwargs): + Obj = get_resource_api(self.SUBNETS, self.connection) + limit = kwargs.get("length", 250) + offset = kwargs.get("offset", 0) + filter_query = kwargs.get("filter_query", "") + + params = {"length": limit, "offset": offset, "filter": filter_query} + res, err = Obj.list_all(base_params=params, ignore_error=True) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + return {"entities": res} + + def categories(self, *args, **kwargs): + Obj = get_resource_api(self.GROUPS, self.connection) + payload = self.CATEGORIES_PAYLOAD + + res, err = Obj.create(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + categories = [] + for group in res["group_results"]: + key = group["group_summaries"]["sum:name"]["values"][0]["values"][0] + for entity in group["entity_results"]: + value = entity["data"][0]["values"][0]["values"][0] + categories.append({"key": key, "value": value}) + + return categories + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(client): + + spec = {} + AhvObj = AhvVmProvider.get_api_obj() + + schema = AhvVmProvider.get_provider_spec() + path = [] # Path to the key + option = [] # Any option occured during finding key + + # VM Configuration + projects = client.project.get_name_uuid_map({"length": 250, "offset": 0}) + project_list = list(projects.keys()) + + if not project_list: + LOG.error("No projects found! Please add one.") + sys.exit(-1) + + click.echo("Choose from given projects:") + for ind, name in enumerate(project_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + project_name = "" + project_id = "" + while True: + ind = click.prompt("\nEnter the index of project", default=1) + if (ind > len(project_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + project_name = project_list[ind - 1] + project_id = projects[project_list[ind - 1]] + click.echo("{} selected".format(highlight_text(project_name))) + break + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + subnets_list = [] + for subnet in project["status"]["resources"]["subnet_reference_list"]: + subnets_list.append(subnet["uuid"]) + + # Extending external subnet's list from remote account + for subnet in project["status"]["resources"].get("external_network_list", []): + subnets_list.append(subnet["uuid"]) + + accounts = project["status"]["resources"]["account_reference_list"] + + reg_accounts = [] + for account in accounts: + reg_accounts.append(account["uuid"]) + + # As account_uuid is required for versions>2.9.0 + account_uuid = "" + is_host_pc = True + payload = {"length": 250, "filter": "type==nutanix_pc"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + for entity in res["entities"]: + entity_id = entity["metadata"]["uuid"] + if entity_id in reg_accounts: + account_uuid = entity_id + break + + # TODO Host PC dependency for categories call due to bug https://jira.nutanix.com/browse/CALM-17213 + if account_uuid: + payload = {"length": 250, "filter": "_entity_id_=={}".format(account_uuid)} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + provider_data = res["entities"][0]["status"]["resources"]["data"] + is_host_pc = provider_data["host_pc"] + + click.echo("") + path.append("name") + spec["name"] = get_field( + schema, + path, + option, + default="vm_@@{calm_application_name}@@-@@{calm_array_index}@@", + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add some categories")), default="n" + ) + if choice[0] == "y": + # TODO Remove dependecy for host_pc after bug CALM-17213 is resolved + categories = AhvObj.categories(host_pc=is_host_pc, account_uuid=account_uuid) + if not categories: + click.echo("\n{}\n".format(highlight_text("No Category present"))) + + else: + click.echo("\n Choose from given categories: \n") + for ind, group in enumerate(categories): + category = "{}:{}".format(group["key"], group["value"]) + click.echo("\t {}. {} ".format(str(ind + 1), highlight_text(category))) + + result = {} + while True: + + while True: + index = click.prompt("\nEnter the index of category", default=1) + if (index > len(categories)) or (index <= 0): + click.echo("Invalid index !!! ") + + else: + break + + group = categories[index - 1] + key = group["key"] + if result.get(key) is not None: + click.echo( + "Category corresponding to key {} already exists ".format(key) + ) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to replace old one")), + default="n", + ) + if choice[0] == "y": + result[key] = group["value"] + click.echo( + highlight_text( + "category with (key = {}) updated".format(key) + ) + ) + + else: + category = "{}:{}".format(group["key"], group["value"]) + click.echo("{} selected".format(highlight_text(category))) + result[key] = group["value"] + + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text("Want to add more categories(y/n)") + ), + default="n", + ) + if choice[0] == "n": + break + + spec["categories"] = result + + spec["resources"] = {} + path[-1] = "resources" + + path.append("num_sockets") + click.echo("") + spec["resources"]["num_sockets"] = get_field( + schema, path, option, default=1, msg="Enter vCPUs count" + ) + + path[-1] = "num_vcpus_per_socket" + click.echo("") + spec["resources"]["num_vcpus_per_socket"] = get_field( + schema, path, option, default=1, msg="Enter Cores per vCPU count" + ) + + path[-1] = "memory_size_mib" + click.echo("") + spec["resources"]["memory_size_mib"] = ( + get_field(schema, path, option, default=1, msg="Enter Memory(GiB)") * 1024 + ) + + click.secho("\nAdd some disks:", fg="blue", bold=True) + + spec["resources"]["disk_list"] = [] + spec["resources"]["boot_config"] = {} + path[-1] = "disk_list" + option.append("AHV Disk") + + adapter_name_index_map = {} + image_index = 0 + while True: + image = {} + image_index += 1 + click.secho( + "\nImage Device {}".format(str(image_index)), bold=True, underline=True + ) + + click.echo("\nChoose from given Device Types :") + device_types = list(AhvConstants.DEVICE_TYPES.keys()) + for index, device_type in enumerate(device_types): + click.echo("\t{}. {}".format(index + 1, highlight_text(device_type))) + + while True: + res = click.prompt("\nEnter the index for Device Type", default=1) + if (res > len(device_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image["device_type"] = AhvConstants.DEVICE_TYPES[device_types[res - 1]] + click.echo("{} selected".format(highlight_text(image["device_type"]))) + break + + click.echo("\nChoose from given Device Bus :") + device_bus_list = list(AhvConstants.DEVICE_BUS[image["device_type"]].keys()) + for index, device_bus in enumerate(device_bus_list): + click.echo("\t{}. {}".format(index + 1, highlight_text(device_bus))) + + while True: + res = click.prompt("\nEnter the index for Device Bus", default=1) + if (res > len(device_bus_list)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image["adapter_type"] = AhvConstants.DEVICE_BUS[image["device_type"]][ + device_bus_list[res - 1] + ] + click.echo("{} selected".format(highlight_text(image["adapter_type"]))) + break + + # Add image details + res = AhvObj.images(account_uuid=account_uuid) + img_name_uuid_map = {} + for entity in res.get("entities", []): + img_type = entity["status"]["resources"].get("image_type", None) + + # Ignoring images, if they don't have any image type(Ex: Karbon Image) + if not img_type: + continue + + if img_type == AhvConstants.IMAGE_TYPES[image["device_type"]]: + img_name_uuid_map[entity["status"]["name"]] = entity["metadata"]["uuid"] + + images = list(img_name_uuid_map.keys()) + while True: + if not images: + click.echo("\n{}".format(highlight_text("No image present"))) + image["name"] = "" + break + + click.echo("\nChoose from given images:") + for ind, name in enumerate(images): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + res = click.prompt("\nEnter the index of image", default=1) + if (res > len(images)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image["name"] = images[res - 1] + click.echo("{} selected".format(highlight_text(image["name"]))) + break + + image["bootable"] = click.prompt("\nIs it bootable(y/n)", default="y") + + if not adapter_name_index_map.get(image["adapter_type"]): + adapter_name_index_map[image["adapter_type"]] = 0 + + disk = { + "data_source_reference": {}, + "device_properties": { + "device_type": image["device_type"], + "disk_address": { + "device_index": adapter_name_index_map[image["adapter_type"]], + "adapter_type": image["adapter_type"], + }, + }, + } + + # If image exists, then update data_source_reference + if image["name"]: + disk["data_source_reference"] = { + "name": image["name"], + "kind": "image", + "uuid": img_name_uuid_map.get(image["name"], ""), + } + + if image["bootable"]: + spec["resources"]["boot_config"] = { + "boot_device": { + "disk_address": { + "device_index": adapter_name_index_map[image["adapter_type"]], + "adapter_type": image["adapter_type"], + } + } + } + + adapter_name_index_map[image["adapter_type"]] += 1 + spec["resources"]["disk_list"].append(disk) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more disks")), default="n" + ) + if choice[0] == "n": + break + + click.echo("\nChoose from given Boot Type :") + boot_types = list(AhvConstants.BOOT_TYPES.keys()) + for index, boot_type in enumerate(boot_types): + click.echo("\t{}. {}".format(index + 1, highlight_text(boot_type))) + + while True: + res = click.prompt("\nEnter the index for Boot Type", default=1) + if (res > len(boot_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + boot_type = AhvConstants.BOOT_TYPES[boot_types[res - 1]] + if boot_type == AhvConstants.BOOT_TYPES["UEFI"]: + spec["resources"]["boot_config"]["boot_type"] = boot_type + click.echo("{} selected".format(highlight_text(boot_type))) + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want any virtual disks")), default="n" + ) + if choice[0] == "y": + option[-1] = "AHV VDisk" + + while True: + vdisk = {} + + click.echo("\nChoose from given Device Types: ") + device_types = list(AhvConstants.DEVICE_TYPES.keys()) + for index, device_type in enumerate(device_types): + click.echo("\t{}. {}".format(index + 1, highlight_text(device_type))) + + while True: + res = click.prompt("\nEnter the index for Device Type", default=1) + if (res > len(device_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + vdisk["device_type"] = AhvConstants.DEVICE_TYPES[ + device_types[res - 1] + ] + click.echo( + "{} selected".format(highlight_text(vdisk["device_type"])) + ) + break + + click.echo("\nChoose from given Device Bus :") + device_bus_list = list(AhvConstants.DEVICE_BUS[vdisk["device_type"]].keys()) + for index, device_bus in enumerate(device_bus_list): + click.echo("\t{}. {}".format(index + 1, highlight_text(device_bus))) + + while True: + res = click.prompt("\nEnter the index for Device Bus: ", default=1) + if (res > len(device_bus_list)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + vdisk["adapter_type"] = AhvConstants.DEVICE_BUS[ + vdisk["device_type"] + ][device_bus_list[res - 1]] + click.echo( + "{} selected".format(highlight_text(vdisk["adapter_type"])) + ) + break + + path.append("disk_size_mib") + + if vdisk["device_type"] == AhvConstants.DEVICE_TYPES["DISK"]: + click.echo("") + msg = "Enter disk size(GB)" + vdisk["size"] = get_field(schema, path, option, default=8, msg=msg) + vdisk["size"] = vdisk["size"] * 1024 + else: + vdisk["size"] = 0 + + if not adapter_name_index_map.get(vdisk["adapter_type"]): + adapter_name_index_map[vdisk["adapter_type"]] = 0 + + disk = { + "device_properties": { + "device_type": vdisk["device_type"], + "disk_address": { + "device_index": adapter_name_index_map[vdisk["adapter_type"]], + "adapter_type": vdisk["adapter_type"], + }, + }, + "disk_size_mib": vdisk["size"], + } + + spec["resources"]["disk_list"].append(disk) + path = path[:-1] + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more virtual disks")), + default="n", + ) + if choice[0] == "n": + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want any network adapters")), default="n" + ) + if choice[0] == "y": + if not subnets_list: + click.echo( + "\n{}".format( + highlight_text("No subnets found registered with the project") + ) + ) + + else: + nics = [] + filter_query = "_entity_id_=in={}".format("|".join(subnets_list)) + nics = AhvObj.subnets(account_uuid=account_uuid, filter_query=filter_query) + nics = nics["entities"] + click.echo("\nChoose from given subnets:") + for ind, nic in enumerate(nics): + click.echo( + "\t {}. {} ({})".format( + str(ind + 1), + highlight_text(nic["status"]["name"]), + highlight_text(nic["status"]["cluster_reference"]["name"]), + ) + ) + + spec["resources"]["nic_list"] = [] + while True: + nic_config = {} + while True: + res = click.prompt("\nEnter the index of subnet's name", default=1) + if (res > len(nics)) or (res <= 0): + click.echo("Invalid index !!!") + + else: + nic_config = nics[res - 1] + click.echo( + "{} selected".format( + highlight_text(nic_config["status"]["name"]) + ) + ) + break + + # Check for static vlan + nic = {} + if nic_config["status"]["resources"].get("ip_config"): + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Use static Ip")), default="n" + ) + if choice[0] == "y": + ip = click.prompt("\nEnter Ip") + nic = {"ip_endpoint_list": [{"ip": ip}]} + + nic.update( + { + "subnet_reference": { + "kind": "subnet", + "name": nic_config["status"]["name"], + "uuid": nic_config["metadata"]["uuid"], + } + } + ) + + spec["resources"]["nic_list"].append(nic) + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text("Want to add more network adpaters") + ), + default="n", + ) + if choice[0] == "n": + break + + path = ["resources"] + option = [] + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add Customization script")), + default="n", + ) + if choice[0] == "y": + path.append("guest_customization") + script_types = AhvConstants.GUEST_CUSTOMIZATION_SCRIPT_TYPES + + click.echo("\nChoose from given script types ") + for index, scriptType in enumerate(script_types): + click.echo("\t {}. {}".format(str(index + 1), highlight_text(scriptType))) + + while True: + index = click.prompt("\nEnter the index for type of script", default=1) + if (index > len(script_types)) or (index <= 0): + click.echo("Invalid index !!!") + else: + script_type = script_types[index - 1] + click.echo("{} selected".format(highlight_text(script_type))) + break + + if script_type == "cloud_init": + option.append("AHV CLOUD INIT Script") + path = path + ["cloud_init", "user_data"] + + click.echo("") + user_data = get_field(schema, path, option, default="") + spec["resources"]["guest_customization"] = { + "cloud_init": {"user_data": user_data} + } + + elif script_type == "sysprep": + option.append("AHV Sys Prep Script") + path.append("sysprep") + script = {} + + install_types = AhvConstants.SYS_PREP_INSTALL_TYPES + click.echo("\nChoose from given install types ") + for index, value in enumerate(install_types): + click.echo("\t {}. {}".format(str(index + 1), highlight_text(value))) + + while True: + index = click.prompt( + "\nEnter the index for type of installing script", default=1 + ) + if (index > len(install_types)) or (index <= 0): + click.echo("Invalid index !!!") + else: + install_type = install_types[index - 1] + click.echo("{} selected\n".format(highlight_text(install_type))) + break + + script["install_type"] = install_type + + path.append("unattend_xml") + script["unattend_xml"] = get_field(schema, path, option, default="") + + sysprep_dict = { + "unattend_xml": script["unattend_xml"], + "install_type": script["install_type"], + } + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to join a domain")), default="n" + ) + + if choice[0] == "y": + domain = click.prompt("\nEnter Domain Name", default="") + dns_ip = click.prompt("\nEnter DNS IP", default="") + dns_search_path = click.prompt("\nEnter DNS Search Path", default="") + credential = click.prompt("\nEnter Credential", default="") + + sysprep_dict.update( + { + "is_domain": True, + "domain": domain, + "dns_ip": dns_ip, + "dns_search_path": dns_search_path, + } + ) + + if credential: # Review after CALM-15575 is resolved + sysprep_dict["domain_credential_reference"] = { + "kind": "app_credential", + "name": credential, + } + + spec["resources"]["guest_customization"] = {"sysprep": sysprep_dict} + + AhvVmProvider.validate_spec(spec) # Final validation (Insert some default's value) + click.echo("\nCreate spec for your AHV VM:\n") + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False))) + + +def find_schema(schema, path, option): + if len(path) == 0: + return {} + + indPath = 0 + indOpt = 0 + + pathLength = len(path) + + while indPath < pathLength: + + if schema.get("anyOf") is not None: + + resDict = None + for optionDict in schema["anyOf"]: + if optionDict["title"] == option[indOpt]: + resDict = optionDict + break + + if not resDict: + print("Not a valid key") + else: + schema = resDict + indOpt = indOpt + 1 + + elif schema["type"] == "array": + schema = schema["items"] + + else: + schema = schema["properties"] + schema = schema[path[indPath]] + indPath = indPath + 1 + + return schema + + +def validate_field(schema, path, options, spec): + + keySchema = find_schema(schema, path, options) + return StrictDraft7Validator(keySchema).is_valid(spec) + + +def get_field(schema, path, options, type=str, default=None, msg=None): + + field = path[-1] + field = field.replace("_", " ") + field = re.sub(r"(?<=\w)([A-Z])", r" \1", field) + field = field.capitalize() + + if msg is None: + msg = "Enter {}".format(field) + + data = "" + while True: + if not default: + data = click.prompt(msg, type=type) + + else: + data = click.prompt(msg, default=default) + + if not validate_field(schema, path, options, data): + click.echo("data incorrect. Enter again") + + else: + break + + return data diff --git a/framework/calm/dsl/providers/plugins/aws_vm/__init__.py b/framework/calm/dsl/providers/plugins/aws_vm/__init__.py new file mode 100644 index 0000000..09b615d --- /dev/null +++ b/framework/calm/dsl/providers/plugins/aws_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import AwsVmProvider + + +__all__ = ["AwsVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/aws_vm/aws_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/aws_vm/aws_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..d437be8 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/aws_vm/aws_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,256 @@ +{% macro awsDisks() -%} + +title: AWS Disks +type: object +properties: + device_name: + type: string + size_gb: + type: integer + volume_type: + type: string + enum: + - IO1 + - STANDARD + - SC1 + - ST1 + - GP2 + delete_on_termination: + type: boolean + iops: + type: integer + snapshot_id: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro awsResource() -%} + +title: AWS Resource +type: object +properties: + account_uuid: + type: string + name: + type: string + instance_type: + type: string + key_name: + type: string + instance_profile_name: + type: string + availability_zone: + type: string + vpc_id: + type: string + associate_public_ip_address: + type: boolean + region: + type: string + private_ip_address: + type: string + type: + type: string + security_group_list: + type: array + items: + type: object + properties: + security_group_id: + type: string + type: + type: string + subnet_id: + type: string + instance_initiated_shutdown_behavior: # TODO add enums + type: string + image_id: + type: string + user_data: + type: string + maxLength: 16000 + state: # TODO add enums(POWER_STATE) + type: string + tag_list: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + type: + type: string + block_device_map: + type: object + properties: + root_disk: + {{ awsDisks() | indent(8) }} + type: + type: string + data_disk_list: + type: array + items: + {{ awsDisks() | indent(10) }} + +{%- endmacro %} + + +{% macro nutanixSnapshotSchedule() -%} + +title: Nutanix Snapshot Schedule +type: object +properties: + is_suspended: + type: boolean + interval_multiple: + type: integer + duration_secs: + type: integer + end_time: + type: integer + interval_type: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro nutanixSnapshotScheduleInfo() -%} + +title: Nutanix Snapshot Schedule Information +type: object +properties: + remote_retention_quantity: + type: integer + snapshot_type: + type: string + local_retention_quantity: + type: integer + type: + type: string + schedule: + {{ nutanixSnapshotSchedule() | indent(4) }} + +{%- endmacro %} + + +{% macro nutanixSnapshotPolicy() -%} + +title: Nutanix Snapshot Policy +type: object +properties: + type: + type: string + replication_target: + type: object + properties: + cluster_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: cluster + name: + type: string + type: + type: string + availability_zone_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: availability_zone + name: + type: string + type: + type: string + snapshot_schedule_list: + type: array + items: + {{ nutanixSnapshotScheduleInfo() | indent(6) }} + +{%- endmacro %} + + +{% macro nutanixBackupPolicy() -%} + +title: Nutanix Backup Policy +type: [object, "null"] +properties: + default_snapshot_type: + type: string + consistency_group_identifier: + type: string + type: + type: string + snapshot_policy_list: + type: array + items: + {{ nutanixSnapshotPolicy() | indent(6) }} + +{%- endmacro %} + + +{% macro awsCreateSpec() -%} + +title: AWS CreateSpec +type: object +properties: + name: + type: string + type: + type: string + enum: [PROVISION_AWS_VM, ''] + default: PROVISION_AWS_VM + resources: + {{ awsResource() | indent(4) }} + cluster_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: cluster + name: + type: string + type: + type: string + availability_zone_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: availability_zone + name: + type: string + type: + type: string + backup_policy: + {{ nutanixBackupPolicy() | indent(4) }} + +{%- endmacro %} + + +info: + title: AWS_VM + description: AWS VM spec payload using v3 API + version: 3.0.1 # TODO add right version of ahv schema + + +components: + schemas: + provider_spec: + {{ awsCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/aws_vm/constants.py b/framework/calm/dsl/providers/plugins/aws_vm/constants.py new file mode 100644 index 0000000..bd89735 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/aws_vm/constants.py @@ -0,0 +1,39 @@ +class AWS: + MACHINE_TYPES = "aws/machine_types" + VOLUME_TYPES = "aws/volume_types" + AVAILABILTY_ZONES = "aws/availability_zones" + MIXED_IMAGES = "aws/mixed_images" + ROLES = "aws/roles" + KEY_PAIRS = "aws/key_pairs" + VPCS = "aws/vpcs" + SECURITY_GROUPS = "aws/security_groups" + SUBNETS = "aws/subnets" + POWER_STATE = { + "RUNNING": "RUNNING", + "REBOOTING": "REBOOTING", + "STOPPED": "STOPPED", + "ON": "ON", + "OFF": "OFF", + } + + VOLUME_TYPE_MAP = { + "Provisioned IOPS SSD": "IO1", + "EBS Magnetic HDD": "STANDARD", + "Cold HDD": "SC1", + "Throughput Optimized HDD": "ST1", + "General Purpose SSD": "GP2", + } + + DeviceMountPoints = { # Constants from calm-ui repoitory + "/dev/sdb": "/dev/sdb", + "/dev/sdc": "/dev/sdc", + "/dev/sdd": "/dev/sdd", + "/dev/sde": "/dev/sde", + "/dev/sdf": "/dev/sdf", + "/dev/sdg": "/dev/sdg", + "/dev/sdh": "/dev/sdh", + "/dev/sdi": "/dev/sdi", + "/dev/sdj": "/dev/sdj", + "/dev/sdk": "/dev/sdk", + "/dev/sdl": "/dev/sdl", + } diff --git a/framework/calm/dsl/providers/plugins/aws_vm/main.py b/framework/calm/dsl/providers/plugins/aws_vm/main.py new file mode 100644 index 0000000..d9a205a --- /dev/null +++ b/framework/calm/dsl/providers/plugins/aws_vm/main.py @@ -0,0 +1,1061 @@ +import click +import uuid + +from collections import OrderedDict +from ruamel import yaml +from distutils.version import LooseVersion as LV + +from calm.dsl.providers import get_provider_interface +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.store.version import Version + +from .constants import AWS as aws + +Provider = get_provider_interface() + + +class AwsVmProvider(Provider): + + provider_type = "AWS_VM" + package_name = __name__ + spec_template_file = "aws_vm_provider_spec.yaml.jinja2" + calm_version = Version.get_version("Calm") + + @classmethod + def create_spec(cls): + client = get_api_client() + create_spec(client) + + @classmethod + def get_api_obj(cls): + """returns object to call ahv provider specific apis""" + + client = get_api_client() + api_handlers = AWSBase.api_handlers + latest_version = "0" + for version in api_handlers.keys(): + if LV(version) <= LV(AwsVmProvider.calm_version) and LV( + latest_version + ) < LV(version): + latest_version = version + + api_handler = api_handlers[latest_version] + return api_handler(client.connection) + + +class AWSBase: + "Base class for AWS provider specific apis" + + api_handlers = OrderedDict() + __api_version__ = None + + def __init_subclass__(cls, **kwargs) -> None: + super().__init_subclass__(**kwargs) + + version = getattr(cls, "__api_version__") + if version: + AWSBase.api_handlers[version] = cls + + @classmethod + def get_version(cls): + return getattr(cls, "__api_version__") + + def regions(self, *args, **kwargs): + raise NotImplementedError("regions call not implemented") + + def machine_types(self, *args, **kwargs): + raise NotImplementedError("machine_types call not implemented") + + def volume_types(self, *args, **kwargs): + raise NotImplementedError("volume_types call not implemented") + + def availibility_zones(self, *args, **kwargs): + raise NotImplementedError("availibility_zones call not implemented") + + def mixed_images(self, *args, **kwargs): + raise NotImplementedError("mixed_images call not implemented") + + def roles(self, *args, **kwargs): + raise NotImplementedError("roles call not implemented") + + def VPCs(self, *args, **kwargs): + raise NotImplementedError("VPCs call not implemented") + + def key_pairs(self, *args, **kwargs): + raise NotImplementedError("key_pairs call not implemented") + + def security_groups(self, *args, **kwargs): + raise NotImplementedError("security_groups call not implemented") + + def subnets(self, *args, **kwargs): + raise NotImplementedError("subnets call not implemented") + + +class AWSV0(AWSBase): + """aws api object for calm version < 3.2.0""" + + __api_version__ = "0" + MACHINE_TYPES = "aws/machine_types" + VOLUME_TYPES = "aws/volume_types" + AVAILABILTY_ZONES = "aws/availability_zones" + MIXED_IMAGES = "aws/mixed_images" + ROLES = "aws/roles" + KEY_PAIRS = "aws/key_pairs" + VPCS = "aws/vpcs" + SECURITY_GROUPS = "aws/security_groups" + SUBNETS = "aws/subnets" + + def __init__(self, connection): + self.connection = connection + + def regions(self, account_id): + Obj = get_resource_api("accounts", self.connection) + res, err = Obj.read(account_id) # TODO remove it from here + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + region_list = [] + res = res.json() + entities = res["spec"]["resources"]["data"]["regions"] + + for entity in entities: + region_list.append(entity["name"]) + + return region_list + + def machine_types(self, account_uuid, region_name): + if LV(AwsVmProvider.calm_version) >= LV("3.7.0"): + payload = { + "filter": "account_uuid=={};region-name=={}".format( + account_uuid, region_name + ) + } + else: + payload = {} + Obj = get_resource_api(self.MACHINE_TYPES, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + entity_list.sort() + return entity_list + + def volume_types(self): + Obj = get_resource_api(self.VOLUME_TYPES, self.connection) + res, err = Obj.list() + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def availability_zones(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.AVAILABILTY_ZONES, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def mixed_images(self, account_id, region_name): + """Returns a map + m[key] = (tupVal1, tupVal2) + tupVal1 = id of the image + tupVal2 = root_device_name of the image + """ + + payload = { + "filter": "account_uuid=={};region=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.MIXED_IMAGES, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + result = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + image_id = entity["status"]["resources"]["id"] + root_device_name = entity["status"]["resources"]["root_device_name"] + + result[name] = (image_id, root_device_name) + + return result + + def roles(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.ROLES, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def key_pairs(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.KEY_PAIRS, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def VPCs(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.VPCS, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + vpc_cidr_id_map = {} + for entity in res["entities"]: + ip_blk = entity["status"]["resources"]["cidr_block"] + vpc_id = entity["status"]["resources"]["id"] + vpc_cidr_id_map[ip_blk] = vpc_id + + return vpc_cidr_id_map + + def security_groups(self, account_id, region_name, vpc_id, inc_classic_sg=False): + + inc_classic_sg = "true" if inc_classic_sg else "false" + payload = { + "filter": "account_uuid=={};region=={};vpc_id=={};include_classic_sg=={}".format( + account_id, region_name, vpc_id, inc_classic_sg + ) + } + + Obj = get_resource_api(self.SECURITY_GROUPS, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + sg_name_id_map = {} + for entity in res["entities"]: + sg_id = entity["status"]["resources"]["id"] + name = entity["status"]["name"] + sg_name_id_map[name] = sg_id + + return sg_name_id_map + + def subnets(self, account_id, region_name, vpc_id, availability_zone): + + payload = { + "filter": "account_uuid=={};region=={};vpc_id=={};availability_zone=={}".format( + account_id, region_name, vpc_id, availability_zone + ) + } + + subnet_list = [] + Obj = get_resource_api(self.SUBNETS, self.connection) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + for entity in res["entities"]: + subnet_id = entity["status"]["resources"]["id"] + subnet_list.append(subnet_id) + + return subnet_list + + +class AWSV1(AWSBase): + """aws api object for calm version >= 3.2.0""" + + __api_version__ = "3.2.0" + MACHINE_TYPES = "aws/v1/machine_types" + VOLUME_TYPES = "aws/v1/volume_types" + AVAILABILTY_ZONES = "aws/v1/availability_zones" + MIXED_IMAGES = "aws/v1/mixed_images" + ROLES = "aws/v1/roles" + KEY_PAIRS = "aws/v1/key_pairs" + VPCS = "aws/v1/vpcs" + SECURITY_GROUPS = "aws/v1/security_groups" + SUBNETS = "aws/v1/subnets" + calm_api = True # to enable v3.0 resource api + + def __init__(self, connection): + self.connection = connection + + def regions(self, account_id): + Obj = get_resource_api("accounts", self.connection, calm_api=self.calm_api) + res, err = Obj.read(account_id) # TODO remove it from here + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + region_list = [] + res = res.json() + entities = res["spec"]["resources"]["data"]["regions"] + + for entity in entities: + region_list.append(entity["name"]) + + return region_list + + def machine_types(self, account_uuid, region_name): + if LV(AwsVmProvider.calm_version) >= LV("3.7.0"): + payload = { + "filter": "account_uuid=={};region-name=={}".format( + account_uuid, region_name + ) + } + else: + payload = {} + Obj = get_resource_api( + self.MACHINE_TYPES, self.connection, calm_api=self.calm_api + ) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + entity_list.sort() + return entity_list + + def volume_types(self): + Obj = get_resource_api( + self.VOLUME_TYPES, self.connection, calm_api=self.calm_api + ) + res, err = Obj.list() + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def availability_zones(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region-name=={}".format(account_id, region_name) + } + Obj = get_resource_api( + self.AVAILABILTY_ZONES, self.connection, calm_api=self.calm_api + ) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def mixed_images(self, account_id, region_name): + """Returns a map + m[key] = (tupVal1, tupVal2) + tupVal1 = id of the image + tupVal2 = root_device_name of the image + """ + + payload = { + "filter": "account_uuid=={};region-name=={}".format( + account_id, region_name, calm_api=self.calm_api + ) + } + Obj = get_resource_api( + self.MIXED_IMAGES, self.connection, calm_api=self.calm_api + ) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + result = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + image_id = entity["status"]["resources"]["id"] + root_device_name = entity["status"]["resources"]["root_device_name"] + + result[name] = (image_id, root_device_name) + + return result + + def roles(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region-name=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.ROLES, self.connection, calm_api=self.calm_api) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def key_pairs(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region-name=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.KEY_PAIRS, self.connection, calm_api=self.calm_api) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["metadata"]["name"]) + + return entity_list + + def VPCs(self, account_id, region_name): + + payload = { + "filter": "account_uuid=={};region-name=={}".format(account_id, region_name) + } + Obj = get_resource_api(self.VPCS, self.connection, calm_api=self.calm_api) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + vpc_cidr_id_map = {} + for entity in res["entities"]: + ip_blk = entity["status"]["resources"]["cidr_block"] + vpc_id = entity["status"]["resources"]["id"] + vpc_cidr_id_map[ip_blk] = vpc_id + + return vpc_cidr_id_map + + def security_groups(self, account_id, region_name, vpc_id, inc_classic_sg=False): + + inc_classic_sg = "true" if inc_classic_sg else "false" + payload = { + "filter": "account_uuid=={};region-name=={};vpc-id=={};include_classic_sg=={}".format( + account_id, region_name, vpc_id, inc_classic_sg + ) + } + + Obj = get_resource_api( + self.SECURITY_GROUPS, self.connection, calm_api=self.calm_api + ) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + sg_name_id_map = {} + for entity in res["entities"]: + sg_id = entity["status"]["resources"]["id"] + name = entity["status"]["name"] + sg_name_id_map[name] = sg_id + + return sg_name_id_map + + def subnets(self, account_id, region_name, vpc_id, availability_zone): + + payload = { + "filter": "account_uuid=={};region-name=={};vpc-id=={};availability-zone=={}".format( + account_id, region_name, vpc_id, availability_zone + ) + } + + subnet_list = [] + Obj = get_resource_api(self.SUBNETS, self.connection, calm_api=self.calm_api) + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + res = res.json() + + for entity in res["entities"]: + subnet_id = entity["status"]["resources"]["id"] + subnet_list.append(subnet_id) + + return subnet_list + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(client): + + spec = {} + Obj = AwsVmProvider.get_api_obj() + + vpc_id = None + region_name = None + account_id = None + root_device_name = None + + # VM Configuration + + projects = client.project.get_name_uuid_map() + project_list = list(projects.keys()) + + if not project_list: + click.echo(highlight_text("No projects found!!!")) + click.echo(highlight_text("Please add first")) + return + + click.echo("\nChoose from given projects:") + for ind, name in enumerate(project_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + project_id = "" + while True: + ind = click.prompt("\nEnter the index of project", default=1) + if (ind > len(project_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + project_id = projects[project_list[ind - 1]] + click.echo("{} selected".format(highlight_text(project_list[ind - 1]))) + break + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + accounts = project["status"]["resources"]["account_reference_list"] + + reg_accounts = [] + for account in accounts: + reg_accounts.append(account["uuid"]) + + payload = {"filter": "type==aws"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + aws_accounts = {} + + for entity in res["entities"]: + entity_name = entity["metadata"]["name"] + entity_id = entity["metadata"]["uuid"] + if entity_id in reg_accounts: + aws_accounts[entity_name] = entity_id + + if not aws_accounts: + click.echo( + highlight_text("No aws account found registered in this project !!!") + ) + click.echo("Please add one !!!") + return + + accounts = list(aws_accounts.keys()) + spec["resources"] = {} + + click.echo("\nChoose from given AWS accounts") + for ind, name in enumerate(accounts): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of account to be used", default=1) + if (res > len(accounts)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + account_name = accounts[res - 1] + account_id = aws_accounts[account_name] # TO BE USED + + spec["resources"]["account_uuid"] = account_id + click.echo("{} selected".format(highlight_text(account_name))) + break + + spec["name"] = "vm_{}".format(str(uuid.uuid4())[-10:]) + spec["name"] = click.prompt("\nEnter instance name", default=spec["name"]) + + choice = click.prompt("\nEnable Associate Public Ip Address(y/n)", default="y") + if choice[0] == "y": + spec["resources"]["associate_public_ip_address"] = True + else: + spec["resources"]["associate_public_ip_address"] = False + click.echo( + highlight_text( + "Calm and AWS should be in the same private network for scripts to run" + ) + ) + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add a region")), default="n" + ) + if account_id + else "n" + ) + + regions = Obj.regions(account_id) if choice[0] == "y" else None + if (not regions) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No region present"))) + + elif regions: + click.echo("\nChoose from given regions") + for ind, name in enumerate(regions): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of region", default=1) + if (res > len(regions)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + region_name = regions[res - 1] # TO BE USED + spec["resources"]["region"] = region_name + click.echo("{} selected".format(highlight_text(region_name))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add an instance type")), + default="n", + ) + if region_name + else "n" + ) + + ins_types = Obj.machine_types(account_id, region_name) if choice[0] == "y" else None + if (not ins_types) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No Instance Profile present"))) + + elif ins_types: + click.echo("\nChoose from given instance types") + for ind, name in enumerate(ins_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of instance type", default=1) + if (res > len(ins_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + instance_type = ins_types[res - 1] + spec["resources"]["instance_type"] = instance_type + click.echo("{} selected".format(highlight_text(instance_type))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add a availability zone")), + default="n", + ) + if region_name + else "n" + ) + + avl_zones = ( + Obj.availability_zones(account_id, region_name) if choice[0] == "y" else None + ) + if (not avl_zones) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No availabilty zone present"))) + + elif avl_zones: + click.echo("\nChoose from given availabilty zones") + for ind, name in enumerate(avl_zones): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of availability zone", default=1) + if (res > len(avl_zones)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + availability_zone = avl_zones[res - 1] + spec["resources"]["availability_zone"] = availability_zone + click.echo("{} selected".format(highlight_text(availability_zone))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add a machine image")), + default="n", + ) + if region_name + else "n" + ) + + mixed_images = Obj.mixed_images(account_id, region_name) if choice[0] == "y" else {} + image_names = list(mixed_images.keys()) + image_names.sort(key=lambda y: y.lower()) + if (not image_names) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No machine image present"))) + + elif image_names: + click.echo("\nChoose from given Machine images") + for ind, name in enumerate(image_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of machine image", default=1) + if (res > len(image_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image_name = image_names[res - 1] + res_tuple = mixed_images[image_name] + + image_id = res_tuple[0] + root_device_name = res_tuple[1] # TO BE USED + spec["resources"]["image_id"] = image_id + click.echo("{} selected".format(highlight_text(image_name))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add an IAM Role")), default="n" + ) + if region_name + else "n" + ) + + ins_pfl_names = Obj.roles(account_id, region_name) if choice[0] == "y" else None + if (not ins_pfl_names) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No instance profile present"))) + + elif ins_pfl_names: + click.echo("\nChoose from given IAM roles") + for ind, name in enumerate(ins_pfl_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of IAM role", default=1) + if (res > len(ins_pfl_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + role = ins_pfl_names[res - 1] + spec["resources"]["instance_profile_name"] = role + click.echo("{} selected".format(highlight_text(role))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add any Key Pair")), default="n" + ) + if region_name + else "n" + ) + + key_pairs = Obj.key_pairs(account_id, region_name) if choice[0] == "y" else None + if (not key_pairs) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No key pair present"))) + + elif key_pairs: + click.echo("\nChoose from given Key Pairs") + for ind, name in enumerate(key_pairs): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Key-Pair", default=1) + if (res > len(key_pairs)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + key_name = key_pairs[res - 1] + spec["resources"]["key_name"] = key_name + click.echo("{} selected".format(highlight_text(key_name))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add any VPC")), default="n" + ) + if region_name + else "n" + ) + + vpc_map = Obj.VPCs(account_id, region_name) if choice[0] == "y" else {} + cidr_names = list(vpc_map.keys()) + + if (not cidr_names) and (choice[0] == "y"): + click.echo("\n{}".format(highlight_text("No VPC present"))) + + elif cidr_names: + click.echo("\nChoose from given VPC") + for ind, name in enumerate(cidr_names): + dis_name = name + " | " + vpc_map[name] + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(dis_name))) + + while True: + res = click.prompt("\nEnter the index of VPC", default=1) + if (res > len(cidr_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + cidr_name = cidr_names[res - 1] + vpc_id = vpc_map[cidr_name] # TO BE USED + spec["resources"]["vpc_id"] = vpc_id + dis_name = cidr_name + " | " + vpc_id + click.echo("{} selected".format(highlight_text(dis_name))) + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to include security groups")), + default="n", + ) + if vpc_id + else "n" + ) + + if choice[0] == "y": + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Include Classic Security Groups")), + default="n", + ) + if choice[0] == "y": + sg_map = Obj.security_groups( + account_id, region_name, vpc_id, inc_classic_sg=True + ) + else: + sg_map = Obj.security_groups(account_id, region_name, vpc_id) + + spec["resources"]["security_group_list"] = [] + sg_names = list(sg_map.keys()) + + while True: + if not sg_names: + click.echo(highlight_text("\nNo security group available!!!")) + break + + else: + click.echo("\nChoose from given security groups: ") + for ind, name in enumerate(sg_names): + dis_name = sg_map[name] + " | " + name + click.echo( + "\t {}. {}".format(str(ind + 1), highlight_text(dis_name)) + ) + + while True: + res = click.prompt("\nEnter the index of security group", default=1) + if (res > len(sg_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + sg_name = sg_names[res - 1] + sg_id = sg_map[sg_name] + dis_name = sg_id + " | " + sg_name + + security_group = {"security_group_id": sg_id} + + spec["resources"]["security_group_list"].append(security_group) + click.echo("{} selected".format(highlight_text(dis_name))) + sg_names.pop(res - 1) + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more security_groups")), + default="n", + ) + if choice[0] == "n": + break + + choice = ( + click.prompt( + "\n{}(y/n)".format(highlight_text("Want to include subnets")), default="n" + ) + if (vpc_id and availability_zone) + else "n" + ) + + if choice[0] == "y": + + subnets = Obj.subnets(account_id, region_name, vpc_id, availability_zone) + if not subnets: + click.echo(highlight_text("\nNo subnet available!!!")) + + else: + click.echo("\nChoose from given subnets") + for ind, name in enumerate(subnets): + dis_name = name + " | " + vpc_id + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(dis_name))) + + while True: + res = click.prompt("\nEnter the index of subnet", default=1) + if (res > len(subnets)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + subnet_name = subnets[res - 1] + spec["resources"]["subnet_id"] = subnet_name + dis_name = subnet_name + " | " + vpc_id + click.echo("{} selected".format(highlight_text(dis_name))) + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to enter user data")), default="n" + ) + if choice[0] == "y": + user_data = click.prompt("\n\tEnter data", type=str) + spec["resources"]["user_data"] = user_data + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add any tags")), default="n" + ) + if choice[0] == "y": + tags = [] + while True: + key = click.prompt("\n\tKey") + value = click.prompt("\tValue") + + tag = {"key": key, "value": value} + tags.append(tag) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more tags")), default="n" + ) + if choice[0] == "n": + spec["resources"]["tag_list"] = tags + break + + click.echo("\n\t\t", nl=False) + click.secho("STORAGE DATA\n", bold=True, underline=True) + click.secho("\tRoot Disk", bold=True) + + spec["resources"]["block_device_map"] = {} + root_disk = {} + + if not root_device_name: + click.echo( + "\nRoot device is dependent on the machine image. Select a machine image to complete root disk configuration" + ) + else: + root_disk["device_name"] = root_device_name + click.echo( + "\nDevice for the root disk: {}".format(highlight_text(root_device_name)) + ) + + root_disk["size_gb"] = click.prompt("\nEnter the size of disk(in gb)", default=8) + + volume_types = list(aws.VOLUME_TYPE_MAP.keys()) + click.echo("\nChoose from given volume types: ") + if not volume_types: + click.echo(highlight_text("\nNo volume type available!!!")) + + else: + for index, name in enumerate(volume_types): + click.echo("\t{}. {}".format(index + 1, highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index for Volume Type", default=1) + if (res > len(volume_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + root_disk["volume_type"] = aws.VOLUME_TYPE_MAP[volume_types[res - 1]] + click.echo("{} selected".format(highlight_text(volume_types[res - 1]))) + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to delete disk on termination")), + default="y", + ) + root_disk["delete_on_termination"] = True if choice[0] == "y" else False + spec["resources"]["block_device_map"]["root_disk"] = root_disk + + click.secho("\n\tOther disks", bold=True) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more disks")), default="n" + ) + + avl_device_names = list(aws.DeviceMountPoints.keys()) + spec["resources"]["block_device_map"]["data_disk_list"] = [] + while choice[0] == "y": + disk = {} + if not avl_device_names: + click.echo(highlight_text("\nNo device name available!!!")) + break + + click.echo("\nChoose from given Device Names: ") + for index, name in enumerate(avl_device_names): + click.echo("\t{}. {}".format(index + 1, highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index for Device Name", default=1) + if (res > len(avl_device_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + disk["device_name"] = aws.DeviceMountPoints[avl_device_names[res - 1]] + click.echo( + "{} selected".format(highlight_text(avl_device_names[res - 1])) + ) + avl_device_names.pop(res - 1) + break + + click.echo("\nChoose from given volume types: ") + for index, name in enumerate(volume_types): + click.echo("\t{}. {}".format(index + 1, highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index for Volume Type", default=1) + if (res > len(volume_types)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + disk["volume_type"] = aws.VOLUME_TYPE_MAP[volume_types[res - 1]] + click.echo("{} selected".format(highlight_text(volume_types[res - 1]))) + break + + disk["size_gb"] = click.prompt("\nEnter the size of disk(in gb)", default=8) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to delete disk on termination")), + default="y", + ) + disk["delete_on_termination"] = True if choice[0] == "y" else False + + spec["resources"]["block_device_map"]["data_disk_list"].append(disk) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more disks")), default="n" + ) + + AwsVmProvider.validate_spec(spec) + click.secho("\nCreate spec for your AWS VM:\n", underline=True) + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False))) diff --git a/framework/calm/dsl/providers/plugins/azure_vm/__init__.py b/framework/calm/dsl/providers/plugins/azure_vm/__init__.py new file mode 100644 index 0000000..192993c --- /dev/null +++ b/framework/calm/dsl/providers/plugins/azure_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import AzureVmProvider + + +__all__ = ["AzureVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..30d110b --- /dev/null +++ b/framework/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,422 @@ + +{% macro azureVaultCertificate() -%} + +title: Azure Vault Certificate +type: object +properties: + certificate_url: + type: string + certificate_store: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureSecrets() -%} + +title: Azure Secrets +type: object +properties: + source_vault_id: + type: string + type: + type: string + vault_certificates: + type: array + items: + {{ azureVaultCertificate() | indent(6) }} + +{%- endmacro %} + + +{% macro azureLinuxOSConfig() -%} + +title: Azure Linux OS Configuration +type: [object, "null"] +properties: + custom_data: + type: string + disable_password_auth: + type: boolean + default: False + public_keys: + type: array + items: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureWinRMListener() -%} + +title: Azure Window RM Listener +type: object +properties: + protocol: + type: string + certificate_url: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureAdditionalUnattendContent() -%} + +title: Azure Additional Unattend Content +type: object +properties: + setting_name: + type: string + xml_content: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureWindowsOSConfig() -%} + +title: Azure Windows OS Configuration +type: [object, "null"] +properties: + winrm_listeners: + type: array + items: + {{ azureWinRMListener() | indent(6) }} + time_zone: + type: string + additional_unattend_content: + type: array + items: + {{ azureAdditionalUnattendContent() | indent(6) }} + provision_vm_agent: + type: boolean + default: True + auto_updates: + type: boolean + default: False + type: + type: string + +{%- endmacro %} + + +{% macro azureOSProfile() -%} + +title: AZURE OS Profile +type: object +properties: + windows_config: + {{ azureWindowsOSConfig() | indent(4) }} + linux_config: + {{ azureLinuxOSConfig() | indent(4) }} + secrets: + type: array + items: + {{ azureSecrets() | indent(6) }} + type: + type: string + +{%- endmacro %} + + +{% macro azureDisk() -%} + +title: AZURE Disk +type: object +properties: + name: + type: string + storage_name: + type: string + storage_type: + type: string + caching_type: + type: string + create_option: + type: string + default: Empty + size_in_gb: + type: integer + minimum: 1 + lun: + type: integer + minimum: 0 + type: + type: string + +{%- endmacro %} + + +{% macro azureOSDisk() -%} + +title: AZURE OS Disk +type: object +properties: + name: + type: string + storage_name: + type: string + storage_type: + type: string + caching_type: + type: string + create_option: + type: string + size_in_gb: + type: integer + default: -1 + lun: + type: integer + default: -1 + type: + type: string + +{%- endmacro %} + + +{% macro azureVMImage() -%} + +title: AZURE VM Image +type: object +properties: + sku: + type: string + publisher: + type: string + offer: + type: string + source_image_id: + type: string + use_custom_image: + type: boolean + version: + type: string + type: + type: string + source_image_type: + type: string + +{%- endmacro %} + + +{% macro azureStorageProfile() -%} + +title: AZURE Storage Profile +type: object +properties: + is_managed: + type: boolean + data_disk_list: + type: array + items: + {{ azureDisk() | indent(6) }} + type: + type: string + os_disk_details: + {{ azureOSDisk() | indent(4) }} + image_details: + {{ azureVMImage() | indent(4) }} + +{%- endmacro %} + + +{% macro azurePrivateIpInfo() -%} + +title: Azure Private Ip Information +type: object +properties: + ip_allocation_method: + type: string + ip_address: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azurePublicIpInfo() -%} + +title: Azure Public Ip Information +type: [object, "null"] +properties: + ip_allocation_method: + type: string + enum: ["Dynamic", "Static"] + ip_name: + type: string + dns_label: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureNIC() -%} + +title: Azure NIC +type: object +properties: + nic_name: + type: string + vnet_name: + type: string + nsg_name: + type: string + subnet_name: + type: string + vnet_id: + type: string + nsg_id: + type: string + subnet_id: + type: string + private_ip_info: + {{ azurePrivateIpInfo() | indent(4) }} + public_ip_info: + {{ azurePublicIpInfo() | indent(4) }} + type: + type: string + +{%- endmacro %} + + +{% macro azureNWProfile() -%} + +title: Azure NW Profile +type: object +properties: + nic_list: + type: array + items: + {{ azureNIC() | indent(6)}} + type: + type: string + primary_nic: + type: integer + default: -1 + +{%- endmacro %} + + +{% macro azureHWProfile() -%} + +title: Azure HW Profile +type: object +properties: + vm_size: + type: string + max_data_disk_count: + type: integer + type: + type: string + +{%- endmacro %} + + +{% macro azureTag() -%} + +title: Azure Tag +type: object +properties: + key: + type: string + value: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro azureResources() -%} + +title: AZURE Resources +type: object +properties: + vm_name: + type: string + resource_group: + type: string + rg_details: + {{ azureRgDetails() | indent(4) }} + rg_operation: + type: string + account_uuid: + type: string + type: + type: string + location: + type: string + availability_set_id: + type: string + availability_option: + type: string + availability_zone: + type: string + tag_list: + type: array + items: + {{ azureTag() | indent(6) }} + hw_profile: + {{ azureHWProfile() | indent(4) }} + storage_profile: + {{ azureStorageProfile() | indent(4) }} + os_profile: + {{ azureOSProfile() | indent(4) }} + nw_profile: + {{ azureNWProfile() | indent(4) }} + +{%- endmacro %} + + +{% macro azureCreateSpec() -%} + +title: AZURE CreateSpec +type: object +properties: + name: + type: string + type: + type: string + enum: [PROVISION_AZURE_VM, ''] + default: PROVISION_AZURE_VM + resources: + {{ azureResources() | indent(4) }} + +{%- endmacro %} + +{% macro azureRgDetails() -%} + +title: AZURE Resource group details +type: object +properties: + delete_rg_if_empty: + type: boolean + rg_location: + type: string + rg_tag_list: + type: array + type: + type: string +{%- endmacro %} + + +info: + title: AZURE_VM + description: AZURE VM spec payload using v3 API + version: 3.0.1 + +components: + schemas: + provider_spec: + {{ azureCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/azure_vm/constants.py b/framework/calm/dsl/providers/plugins/azure_vm/constants.py new file mode 100644 index 0000000..5ba1cc1 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/azure_vm/constants.py @@ -0,0 +1,30 @@ +class AZURE: + + VERSION = "v1" + URL = "azure_rm/{}".format(VERSION) + AVAILABILTY_SETS = "{}/availability_sets".format(URL) + AVAILABILITY_ZONES = "{}/availability_zones".format(URL) + SECURITY_GROUPS = "{}/security_groups".format(URL) + VIRTUAL_NETWORKS = "{}/virtual_networks".format(URL) + SUBNETS = "{}/subnets".format(URL) + RESOURCE_GROUPS = "{}/resource_groups".format(URL) + LOCATIONS = "{}/locations".format(URL) + VM_SIZES = "{}/vm_sizes".format(URL) + IMAGE_PUBLISHERS = "{}/image_publishers".format(URL) + IMAGE_OFFERS = "{}/image_offers".format(URL) + IMAGE_SKUS = "{}/image_skus".format(URL) + IMAGE_VERSIONS = "{}/image_versions".format(URL) + SUBSCRIPTION_IMAGES = "{}/subscription_images".format(URL) + IMAGES = "{}/images".format(URL) + + UNATTENDED_SETTINGS = ["FirstLogonCommands", "AutoLogon"] + PROTOCOLS = {"HTTP": "Http", "HTTPS": "Https"} + OPERATING_SYSTEMS = ["Linux", "Windows"] + CACHE_TYPES = {"None": "None", "Read Write": "ReadWrite", "Write Only": "WriteOnly"} + STORAGE_TYPES = {"Standard": "Standard_LRS", "Premium": "Premium_LRS"} + DISK_CREATE_OPTIONS = { + "ATTACH": "Attach", + "EMPTY": "Empty", + "FROMIMAGE": "FromImage", + } + ALLOCATION_METHODS = ["Dynamic", "Static"] diff --git a/framework/calm/dsl/providers/plugins/azure_vm/main.py b/framework/calm/dsl/providers/plugins/azure_vm/main.py new file mode 100644 index 0000000..f7ff0b9 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/azure_vm/main.py @@ -0,0 +1,1314 @@ +import click +from ruamel import yaml +from distutils.version import LooseVersion as LV + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.providers import get_provider_interface +from calm.dsl.store import Version +from .constants import AZURE as azure + + +Provider = get_provider_interface() + + +class AzureVmProvider(Provider): + + provider_type = "AZURE_VM" + package_name = __name__ + spec_template_file = "azure_vm_provider_spec.yaml.jinja2" + + @classmethod + def create_spec(cls): + client = get_api_client() + create_spec(client) + + @classmethod + def get_api_obj(cls): + """returns object to call azure provider specific apis""" + + client = get_api_client() + return Azure(client.connection) + + +class Azure: + def __init__(self, connection): + self.connection = connection + + def resource_groups(self, account_id): + Obj = get_resource_api(azure.RESOURCE_GROUPS, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res_groups = [] + res = res.json() + for entity in res["entities"]: + res_groups.append(entity["status"]["name"]) + + return res_groups + + def availability_sets(self, account_id, resource_group): + Obj = get_resource_api(azure.AVAILABILTY_SETS, self.connection) + payload = { + "filter": "account_uuid=={};resource_group=={}".format( + account_id, resource_group + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + name_id_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + entity_uuid = entity["status"]["resources"]["id"] + name_id_map[name] = entity_uuid + + return name_id_map + + def locations(self, account_id): + Obj = get_resource_api(azure.LOCATIONS, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_value_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["displayName"] + value = entity["status"]["resources"]["name"] + name_value_map[name] = value + + return name_value_map + + def availability_zones(self, account_id, resource_group, location): + Obj = get_resource_api(azure.AVAILABILITY_ZONES, self.connection) + payload = { + "filter": "account_uuid=={};resource_group=={};location=={}".format( + account_id, resource_group, location + ) + } + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_value_map = dict() + for entity in res["entities"]: + if "zones" in entity["status"]["resources"]: + zones = entity["status"]["resources"]["zones"] + for zone in zones: + name_value_map[zone["name"]] = zone["value"] + + return name_value_map + + def hardware_profiles(self, account_id, location): + Obj = get_resource_api(azure.VM_SIZES, self.connection) + payload = { + "filter": "account_uuid=={};location=={}".format(account_id, location) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + hwprofiles = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + max_disk_count = entity["status"]["resources"]["maxDataDiskCount"] + hwprofiles[name] = max_disk_count + + return hwprofiles + + def custom_images(self, account_id, location): + Obj = get_resource_api(azure.SUBSCRIPTION_IMAGES, self.connection) + payload = { + "filter": "account_uuid=={};location=={}".format(account_id, location) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + id = entity["status"]["resources"]["id"] + name_id_map[name] = id + + return name_id_map + + def image_publishers(self, account_id, location): + Obj = get_resource_api(azure.IMAGE_PUBLISHERS, self.connection) + payload = { + "filter": "account_uuid=={};location=={}".format(account_id, location) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def image_offers(self, account_id, location, publisher): + Obj = get_resource_api(azure.IMAGE_OFFERS, self.connection) + payload = { + "filter": "account_uuid=={};location=={};publisher=={}".format( + account_id, location, publisher + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def image_skus(self, account_id, location, publisher, offer): + Obj = get_resource_api(azure.IMAGE_SKUS, self.connection) + payload = { + "filter": "account_uuid=={};location=={};publisher=={};offer=={}".format( + account_id, location, publisher, offer + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def image_versions(self, account_id, location, publisher, offer, sku): + Obj = get_resource_api(azure.IMAGE_VERSIONS, self.connection) + payload = { + "filter": "account_uuid=={};location=={};publisher=={};offer=={};sku=={}".format( + account_id, location, publisher, offer, sku + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def security_groups(self, account_id, resource_group, location): + Obj = get_resource_api(azure.SECURITY_GROUPS, self.connection) + payload = { + "filter": "account_uuid=={};location=={};resource_group=={}".format( + account_id, location, resource_group + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def virtual_networks(self, account_id, resource_group, location): + Obj = get_resource_api(azure.VIRTUAL_NETWORKS, self.connection) + payload = { + "filter": "account_uuid=={};location=={};resource_group=={}".format( + account_id, location, resource_group + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + def subnets(self, account_id, resource_group, virtual_network): + Obj = get_resource_api(azure.SUBNETS, self.connection) + payload = { + "filter": "account_uuid=={};virtual_network=={};resource_group=={}".format( + account_id, virtual_network, resource_group + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + entity_list = [] + for entity in res["entities"]: + name = entity["status"]["name"] + entity_list.append(name) + + return entity_list + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(client): + + CALM_VERSION = Version.get_version("Calm") + spec = {} + Obj = Azure(client.connection) + + account_id = "" + resource_group = "" + location = "" + vm_os = "" + + # VM Configuration + + projects = client.project.get_name_uuid_map() + project_list = list(projects.keys()) + + if not project_list: + click.echo(highlight_text("No projects found!!!")) + click.echo(highlight_text("Please add first")) + return + + click.echo("\nChoose from given projects:") + for ind, name in enumerate(project_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + project_id = "" + while True: + ind = click.prompt("\nEnter the index of project", default=1) + if (ind > len(project_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + project_id = projects[project_list[ind - 1]] + click.echo("{} selected".format(highlight_text(project_list[ind - 1]))) + break + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + accounts = project["status"]["resources"]["account_reference_list"] + + reg_accounts = [] + for account in accounts: + reg_accounts.append(account["uuid"]) + + payload = {"filter": "type==azure"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + azure_accounts = {} + + for entity in res["entities"]: + entity_name = entity["metadata"]["name"] + entity_id = entity["metadata"]["uuid"] + if entity_id in reg_accounts: + azure_accounts[entity_name] = entity_id + + accounts = list(azure_accounts.keys()) + spec["resources"] = {} + + click.echo("\nChoose from given AZURE accounts") + for ind, name in enumerate(accounts): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of account to be used", default=1) + if (res > len(accounts)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + account_name = accounts[res - 1] + account_id = azure_accounts[account_name] # TO BE USED + + spec["resources"]["account_uuid"] = account_id + click.echo("{} selected".format(highlight_text(account_name))) + break + + if not account_id: + click.echo( + highlight_text("No azure account found registered in this project !!!") + ) + click.echo("Please add one !!!") + return + + click.echo("\nChoose from given Operating System types:") + os_types = azure.OPERATING_SYSTEMS + + for ind, name in enumerate(os_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of operating system", default=1) + if (ind > len(os_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + vm_os = os_types[ind - 1] + click.echo("{} selected".format(highlight_text(vm_os))) + break + + click.echo("\n\t\t", nl=False) + click.secho("VM Configuration", bold=True, underline=True) + + vm_name = "vm-@@{calm_unique_hash}@@-@@{calm_array_index}@@" + spec["resources"]["vm_name"] = click.prompt( + "\nEnter instance name", default=vm_name + ) + + # Add resource group + resource_groups = Obj.resource_groups(account_id) + if not resource_groups: + click.echo("\n{}".format(highlight_text("No resource group present"))) + + else: + click.echo("\nChoose from given resource groups") + for ind, name in enumerate(resource_groups): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of resource group", default=1) + if (res > len(resource_groups)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + resource_group = resource_groups[res - 1] # TO BE USED + spec["resources"]["resource_group"] = resource_group + click.echo("{} selected".format(highlight_text(resource_group))) + break + + # Add location + locations = Obj.locations(account_id) + if not locations: + click.echo("\n{}".format(highlight_text("No location group present"))) + + else: + click.echo("\nChoose from given locations") + location_names = list(locations.keys()) + for ind, name in enumerate(location_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of resource group", default=1) + if (res > len(location_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + location = location_names[res - 1] + click.echo("{} selected".format(highlight_text(location))) + location = locations[location] + spec["resources"]["location"] = location + break + + if LV(CALM_VERSION) < LV("3.2.0"): + # Add availabililty set + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add a availabilty set")), + default="n", + ) + if choice[0] == "y": + availability_sets = Obj.availability_sets(account_id, resource_group) + avl_set_list = list(availability_sets.keys()) + + if not avl_set_list: + click.echo("\n{}".format(highlight_text("No availability_set present"))) + + else: + click.echo("\nChoose from given availabilty set") + for ind, name in enumerate(avl_set_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt( + "\nEnter the index of availabilty set", default=1 + ) + if (res > len(avl_set_list)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + avl_set = avl_set_list[res - 1] + spec["resources"]["availability_set_id"] = availability_sets[ + avl_set + ] + click.echo("{} selected".format(highlight_text(avl_set))) + break + + else: + # Add availability option + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to select availability options")), + default="n", + ) + if choice[0] == "y": + availability_options = ["Availability Sets", "Availability Zones"] + click.echo("\nChoose from given availability options") + for ind, name in enumerate(availability_options): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of option", default=1) + if (res > len(availability_options)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + spec["resources"]["availability_option"] = availability_options[ + res - 1 + ].replace(" ", "") + click.echo( + "{} selected".format( + highlight_text(availability_options[res - 1]) + ) + ) + if res == 1: + availability_sets = Obj.availability_sets( + account_id, spec["resources"]["resource_group"] + ) + avl_set_list = list(availability_sets.keys()) + + if not avl_set_list: + click.echo( + "\n{}".format( + highlight_text("No availability_set present") + ) + ) + + else: + click.echo("\nChoose from given availabilty set") + for ind, name in enumerate(avl_set_list): + click.echo( + "\t {}. {}".format( + str(ind + 1), highlight_text(name) + ) + ) + + while True: + res = click.prompt( + "\nEnter the index of availabilty set", default=1 + ) + if (res > len(avl_set_list)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + avl_set = avl_set_list[res - 1] + spec["resources"][ + "availability_set_id" + ] = availability_sets[avl_set] + click.echo( + "{} selected".format(highlight_text(avl_set)) + ) + break + + else: + availability_zones = Obj.availability_zones( + account_id, + spec["resources"]["resource_group"], + spec["resources"]["location"], + ) + if not availability_zones: + click.echo( + "\n{}".format( + highlight_text( + "Selected location does not support Availability Zones" + ) + ) + ) + else: + click.echo("\nChoose from the given zones") + zones = list(availability_zones.keys()) + for ind, name in enumerate(zones): + click.echo( + "\t {}. {}".format( + str(ind + 1), highlight_text(name) + ) + ) + + while True: + res = click.prompt( + "\nEnter the index of zone", default=1 + ) + if (res > len(availability_zones)) or (res <= 0): + click.echo("Invalid index !!! ") + else: + click.echo( + "{} selected".format( + highlight_text(zones[res - 1]) + ) + ) + spec["resources"][ + "availability_zone" + ] = availability_zones[zones[res - 1]] + break + break + + hardware_profiles = Obj.hardware_profiles(account_id, location) + if not hardware_profiles: + click.echo("\n{}".format(highlight_text("No hardware profile present"))) + + else: + click.echo("\nChoose from given Hardware Profiles") + hw_profile_names = list(hardware_profiles.keys()) + + for ind, name in enumerate(hw_profile_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Hardware Profile", default=1) + if (res > len(hw_profile_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + hw_profile = hw_profile_names[res - 1] + click.echo("{} selected".format(highlight_text(hw_profile))) + spec["resources"]["hw_profile"] = { + "vm_size": hw_profile, + "max_data_disk_count": hardware_profiles[hw_profile], + } + break + + # OS Profile + spec["resources"]["os_profile"] = get_os_profile(vm_os) + + # Storage Profile + spec["resources"]["storage_profile"] = get_storage_profile( + Obj, account_id, location + ) + + # Network Profile + spec["resources"]["nw_profile"] = {} + spec["resources"]["nw_profile"]["nic_list"] = get_nw_profile( + Obj, account_id, resource_group, location + ) + + # Add tags + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add any tags")), default="n" + ) + if choice[0] == "y": + tags = [] + while True: + key = click.prompt("\n\tKey") + value = click.prompt("\tValue") + + tag = {"key": key, "value": value} + tags.append(tag) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more tags")), default="n" + ) + if choice[0] == "n": + spec["resources"]["tag_list"] = tags + break + + AzureVmProvider.validate_spec(spec) + click.secho("\nCreate spec for your AZURE VM:\n", underline=True) + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False))) + + +def get_os_profile(os_type): + + click.echo("\n\t\t", nl=False) + click.secho("OS PROFILE DETAILS", bold=True, underline=True) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add secrets")), default="n" + ) + + res = {} + res["secrets"] = [] + certificate_list = [] + while choice[0] == "y": + vault_id = click.prompt("\n\tEnter Vault ID ", default="") + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Add Vault Certificate Details")), + default="n", + ) + + vault_certificates = [] + while choice[0] == "y": + certificate_store = "" + certificate_url = click.prompt("\n\tEnter Certificate URL", default="URL") + if os_type == "Windows": + certificate_store = click.prompt( + "\n\tEnter Certificate Store", default="Store" + ) + + vault_certificates.append( + { + "certificate_url": certificate_url, + "certificate_store": certificate_store, + } + ) + + if certificate_url: + certificate_list.append(certificate_url) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Add more certificates")), default="n" + ) + + res["secrets"].append( + {"source_vault_id": vault_id, "vault_certificates": vault_certificates} + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Add more secrets")), default="n" + ) + + if os_type == "Linux": + res["linux_config"] = get_linux_config() + + else: + res["windows_config"] = get_windows_config(certificate_list) + + return res + + +def get_linux_config(): + + custom_data = click.prompt("\nEnter Cloud Init Script", default="") + return {"custom_data": custom_data} + + +def get_windows_config(certificate_list): + + provision_vm_agent = click.prompt( + "\n{}(y/n)".format(highlight_text("Enable Provision Windows Guest Agent")), + default="n", + ) + provision_vm_agent = True if provision_vm_agent[0] == "y" else False + auto_updates = click.prompt( + "\n{}(y/n)".format(highlight_text("Enable Automatic OS Upgrades")), default="n" + ) + auto_updates = True if auto_updates[0] == "y" else False + + unattend_content = [] + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add ADDITIONAL UNATTENDED CONTENT")), + default="n", + ) + settings = azure.UNATTENDED_SETTINGS + while (choice[0] == "y") and settings: + click.echo("\nChoose from given Setting Names") + setting = "" + for ind, name in enumerate(settings): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Setting", default=1) + if (res > len(settings)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + setting = settings[res - 1] + settings.pop(res - 1) + click.echo("{} selected".format(highlight_text(setting))) + break + + xml_content = click.prompt( + "\nEnter XML Content(Please use <{}> as the root element)".format(setting), + default="", + ) + unattend_content.append({"setting_name": setting, "xml_content": xml_content}) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more Unattended content")), + default="n", + ) + + winrm_listensers = [] + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add WINRM LISTENERS")), default="n" + ) + protocols = list(azure.PROTOCOLS.keys()) + while (choice[0] == "y") and protocols: + click.echo("\nChoose from given Protocols") + protocol = "" + for ind, name in enumerate(protocols): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of protocol", default=1) + if (res > len(protocols)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + protocol = protocols[res - 1] + protocols.pop(res - 1) + click.echo("{} selected".format(highlight_text(protocol))) + break + + if protocol == "HTTPS": + cert_url = "" + click.echo("Choose from given certificate URLs") + for ind, name in enumerate(certificate_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of certificate URL", default=1) + if (res > len(certificate_list)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + cert_url = certificate_list[res - 1] + click.echo("{} selected".format(highlight_text(cert_url))) + break + + winrm_listensers.append( + {"protocol": azure.PROTOCOLS[protocol], "certificate_url": cert_url} + ) + + else: + winrm_listensers.append({"protocol": azure.PROTOCOLS[protocol]}) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more Winrm Listeners")), + default="n", + ) + + return { + "winrm_listeners": winrm_listensers, + "additional_unattend_content": unattend_content, + "provision_vm_agent": provision_vm_agent, + "auto_updates": auto_updates, + } + + +def get_storage_profile(azure_obj, account_id, location): + + click.echo("\n\t\t", nl=False) + click.secho("STORAGE PROFILE DETAILS", bold=True, underline=True) + + click.secho("\n1. VM Image Details", underline=True) + vm_image = {} + + use_custom_image = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to use custom image")), default="n" + ) + use_custom_image = True if use_custom_image[0] == "y" else False + + if use_custom_image: + vm_image = get_custom_vm_image(azure_obj, account_id, location) + + else: + vm_image = get_non_custom_vm_image(azure_obj, account_id, location) + + click.secho("\n2. OS Disk Details", underline=True) + os_disk = get_os_disk(use_custom_image) + + click.secho("\n3. Data Disk Details", underline=True) + data_disks = get_data_disks() + + return { + "is_managed": True, # Hardcoded in UI + "os_disk_details": os_disk, + "data_disk_list": data_disks, + "image_details": vm_image, + } + + +def get_data_disks(): + + disks = [] + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add data disks")), default="n" + ) + + disk_index = 0 + while choice[0] == "y": + click.echo("\n\t\t", nl=False) + click.secho("Data-Disk {}".format(disk_index + 1), underline=True) + + storage_type = "" + disk_name = "data-disk-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str( + disk_index + ) + disk_name = click.prompt("\nEnter data disk name", default=disk_name) + + # Add storage type + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add storage type to disk")), + default="n", + ) + if choice[0] == "y": + storage_types = azure.STORAGE_TYPES + display_names = list(storage_types.keys()) + click.echo("\nChoose from given storage types") + for ind, name in enumerate(display_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of storage type", default=1) + if (res > len(display_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + storage_type = display_names[res - 1] + click.echo("{} selected".format(highlight_text(storage_type))) + storage_type = storage_types[storage_type] + break + + # Add cache type + cache_types = azure.CACHE_TYPES + display_names = list(cache_types.keys()) + click.echo("\nChoose from given cache types") + for ind, name in enumerate(display_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of cache type", default=1) + if (res > len(display_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + cache_type = display_names[res - 1] + click.echo("{} selected".format(highlight_text(cache_type))) + cache_type = cache_types[cache_type] + break + + # Add disk size + disk_size = click.prompt("\nEnter the size for disk(in GiB)", default=1) + + # Add disk lun + disk_lun = click.prompt("\nEnter the Disk LUN", default=0) + + disks.append( + { + "size_in_gb": disk_size, + "name": disk_name, + "storage_type": storage_type, + "caching_type": cache_type, + "lun": disk_lun, + } + ) + + disk_index += 1 + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more data disks")), + default="n", + ) + + return disks + + +def get_os_disk(use_custom_image): + + disk_create_option = "" + cache_type = "" + storage_type = "" + + disk_name = "os-@@{calm_unique_hash}@@-@@{calm_array_index}@@-disk" + disk_name = click.prompt("\nEnter os disk name", default=disk_name) + + # Add storage type + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add storage type to os disk")), + default="n", + ) + if choice[0] == "y": + storage_types = azure.STORAGE_TYPES + display_names = list(storage_types.keys()) + click.echo("\nChoose from given storage types") + for ind, name in enumerate(display_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of storage type", default=1) + if (res > len(display_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + storage_type = display_names[res - 1] + click.echo("{} selected".format(highlight_text(storage_type))) + storage_type = storage_types[storage_type] + break + + # Add cache type + cache_types = azure.CACHE_TYPES + display_names = list(cache_types.keys()) + click.echo("\nChoose from given cache types") + for ind, name in enumerate(display_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of cache type", default=1) + if (res > len(display_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + cache_type = display_names[res - 1] + click.echo("{} selected".format(highlight_text(cache_type))) + cache_type = cache_types[cache_type] + break + + # Add Disk Create Option + if use_custom_image: + disk_create_option = azure.DISK_CREATE_OPTIONS["FROMIMAGE"] + click.secho( + "\nNote: In case of custom vm image, Os Disk Create Option : {}".format( + disk_create_option + ) + ) + + else: + disk_create_options = azure.DISK_CREATE_OPTIONS + display_names = list(disk_create_options.keys()) + click.echo("\nChoose from given disk create option") + for ind, name in enumerate(display_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of disk create option", default=1) + if (res > len(display_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + disk_create_option = display_names[res - 1] + click.echo("{} selected".format(highlight_text(disk_create_option))) + disk_create_option = disk_create_options[disk_create_option] + break + + return { + "name": disk_name, + "storage_type": storage_type, + "caching_type": cache_type, + "create_option": disk_create_option, + } + + +def get_non_custom_vm_image(azure_obj, account_id, location): + + image_publisher = "" + image_offer = "" + image_sku = "" + image_version = "" + + # Add image publisher + publishers = azure_obj.image_publishers(account_id, location) + if not publishers: + click.echo("\n{}".format(highlight_text("No image publisher present"))) + + else: + click.echo("\nChoose from given image publisher") + for ind, name in enumerate(publishers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of image publisher", default=1) + if (res > len(publishers)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image_publisher = publishers[res - 1] + click.echo("{} selected".format(highlight_text(image_publisher))) + break + + # Add image offer + image_offers = azure_obj.image_offers(account_id, location, image_publisher) + if not image_offers: + click.echo("\n{}".format(highlight_text("No image offer present"))) + + else: + click.echo("\nChoose from given image offer") + for ind, name in enumerate(image_offers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of image offer", default=1) + if (res > len(image_offers)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image_offer = image_offers[res - 1] + click.echo("{} selected".format(highlight_text(image_offer))) + break + + # Add Image SKU + image_skus = azure_obj.image_skus( + account_id, location, image_publisher, image_offer + ) + if not image_skus: + click.echo("\n{}".format(highlight_text("No image sku present"))) + + else: + click.echo("\nChoose from given image sku") + for ind, name in enumerate(image_skus): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of image sku", default=1) + if (res > len(image_skus)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image_sku = image_skus[res - 1] + click.echo("{} selected".format(highlight_text(image_sku))) + break + + # Add Image Version + image_versions = azure_obj.image_versions( + account_id, location, image_publisher, image_offer, image_sku + ) + if not image_versions: + click.echo("\n{}".format(highlight_text("No image version present"))) + + else: + click.echo("\nChoose from given image version") + for ind, name in enumerate(image_versions): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of image version", default=1) + if (res > len(image_versions)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + image_version = image_versions[res - 1] + click.echo("{} selected".format(highlight_text(image_version))) + break + + return { + "sku": image_sku, + "publisher": image_publisher, + "offer": image_offer, + "version": image_version, + "use_custom_image": False, + } + + +def get_custom_vm_image(azure_obj, account_id, location): + custom_image_id = "" + custom_images = azure_obj.custom_images(account_id, location) + custom_image_names = list(custom_images.keys()) + + if not custom_image_names: + click.echo("\n{}".format(highlight_text("No custom image present"))) + + else: + click.echo("\nChoose from given custom images") + for ind, name in enumerate(custom_image_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of custom image", default=1) + if (res > len(custom_image_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + custom_image = custom_image_names[res - 1] + custom_image_id = custom_images[custom_image] + click.echo("{} selected".format(highlight_text(custom_image))) + break + + return {"source_image_id": custom_image_id, "use_custom_image": True} + + +def get_nw_profile(azure_obj, account_id, resource_grp, location): + + click.echo("\n\t\t", nl=False) + click.secho("NETWORK PROFILE DETAILS", bold=True, underline=True) + nics = [] + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add NICs")), default="n" + ) + + nic_index = 0 + while choice[0] == "y": + click.echo("\n\t\t", nl=False) + click.secho("Nic {}".format(nic_index + 1), underline=True) + + nic_name = "nic-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index) + nic_name = click.prompt("\nEnter nic name", default=nic_name) + + security_group = "" + virtual_network = "" + subnet = "" + + # Add security group + security_groups = azure_obj.security_groups(account_id, resource_grp, location) + if not security_groups: + click.echo("\n{}".format(highlight_text("No security group present"))) + + else: + click.echo("\nChoose from given security groups") + for ind, name in enumerate(security_groups): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of security group", default=1) + if (res > len(security_groups)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + security_group = security_groups[res - 1] + click.echo("{} selected".format(highlight_text(security_group))) + break + + # Add virtual network + virtual_networks = azure_obj.virtual_networks( + account_id, resource_grp, location + ) + if not virtual_networks: + click.echo("\n{}".format(highlight_text("No virtual network present"))) + + else: + click.echo("\nChoose from given virtual networtks") + for ind, name in enumerate(virtual_networks): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of virtual network", default=1) + if (res > len(virtual_networks)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + virtual_network = virtual_networks[res - 1] + click.echo("{} selected".format(highlight_text(virtual_network))) + break + + # Add subnet + subnets = azure_obj.subnets(account_id, resource_grp, virtual_network) + if not subnets: + click.echo("\n{}".format(highlight_text("No subnet present"))) + + else: + click.echo("\nChoose from given subnets") + for ind, name in enumerate(subnets): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of subnet", default=1) + if (res > len(subnets)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + subnet = subnets[res - 1] + click.echo("{} selected".format(highlight_text(subnet))) + break + + click.secho("\nPublic IP Config", underline=True) + public_ip_info = get_public_ip_info(nic_index) + + click.secho("\nPrivate IP Config", underline=True) + private_ip_info = get_private_ip_info() + + nics.append( + { + "nsg_name": security_group, + "vnet_name": virtual_network, + "private_ip_info": private_ip_info, + "nic_name": nic_name, + "subnet_name": subnet, + "public_ip_info": public_ip_info, + } + ) + + nic_index += 1 + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more nics")), default="n" + ) + + return nics + + +def get_public_ip_info(nic_index=0): + + ip_name = "public-ip-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str( + nic_index + ) + ip_name = click.prompt("\nEnter public ip name", default=ip_name) + + dns_label = "dns-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index) + dns_label = click.prompt("\nEnter DNS Label", default=dns_label) + + allocation_methods = azure.ALLOCATION_METHODS + click.echo("\nChoose from given ip allocation method") + for ind, name in enumerate(allocation_methods): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of allocation methods", default=1) + if (res > len(allocation_methods)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + allocation_method = allocation_methods[res - 1] + click.echo("{} selected".format(highlight_text(allocation_method))) + break + + return { + "ip_allocation_method": allocation_method, + "dns_label": dns_label, + "ip_name": ip_name, + } + + +def get_private_ip_info(): + + allocation_method = "" + ip_address = "" + + allocation_methods = azure.ALLOCATION_METHODS + click.echo("\nChoose from given ip allocation method") + for ind, name in enumerate(allocation_methods): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of allocation methods", default=1) + if (res > len(allocation_methods)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + allocation_method = allocation_methods[res - 1] + click.echo("{} selected".format(highlight_text(allocation_method))) + break + + if allocation_method == "Static": + ip_address = click.prompt("\nEnter IP Address", default="") + + return {"ip_allocation_method": allocation_method, "ip_address": ip_address} diff --git a/framework/calm/dsl/providers/plugins/existing_vm/__init__.py b/framework/calm/dsl/providers/plugins/existing_vm/__init__.py new file mode 100644 index 0000000..aa68b18 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/existing_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import ExistingVmProvider + + +__all__ = ["ExistingVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/existing_vm/existing_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/existing_vm/existing_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..1d091ec --- /dev/null +++ b/framework/calm/dsl/providers/plugins/existing_vm/existing_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,22 @@ +{% macro EMCreateSpec() -%} +title: Existing Machine CreateSpec +type: object +properties: + type: + type: string + enum: [PROVISION_EXISTING_MACHINE, ''] + default: PROVISION_EXISTING_MACHINE + address: + type: string + +{%- endmacro %} + +info: + title: EXISTING_VM + description: Existing machine + version: 1.0.0 + +components: + schemas: + provider_spec: + {{ EMCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/existing_vm/main.py b/framework/calm/dsl/providers/plugins/existing_vm/main.py new file mode 100644 index 0000000..d9d39d9 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/existing_vm/main.py @@ -0,0 +1,33 @@ +import click +from ruamel import yaml + +from calm.dsl.providers import get_provider_interface + + +Provider = get_provider_interface() + + +# Implements Provider interface for EXISTING_VM +class ExistingVmProvider(Provider): + + package_name = __name__ + provider_type = "EXISTING_VM" + spec_template_file = "existing_vm_provider_spec.yaml.jinja2" + + @classmethod + def create_spec(cls): + create_spec() + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(): + + addr = click.prompt("Enter the address :", default="") + spec = {"address": addr} + + click.secho("\nCreate spec for your Existing Machine VM:\n", underline=True) + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False))) diff --git a/framework/calm/dsl/providers/plugins/gcp_vm/__init__.py b/framework/calm/dsl/providers/plugins/gcp_vm/__init__.py new file mode 100644 index 0000000..b364b5c --- /dev/null +++ b/framework/calm/dsl/providers/plugins/gcp_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import GcpVmProvider + + +__all__ = ["GcpVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/gcp_vm/constants.py b/framework/calm/dsl/providers/plugins/gcp_vm/constants.py new file mode 100644 index 0000000..404dcbb --- /dev/null +++ b/framework/calm/dsl/providers/plugins/gcp_vm/constants.py @@ -0,0 +1,39 @@ +class GCP: + + STORAGE_DISK_MAP = { + "local-ssd": "SCRATCH", + "pd-standard": "PERSISTENT", + "pd-ssd": "PERSISTENT", + } + STORAGE_TYPES = ["pd-standard", "pd-ssd"] + ADDITIONAL_DISK_STORAGE_TYPES = ["pd-standard", "pd-ssd", "local-ssd"] + DISK_INTERFACES = ["SCSI", "NVMe"] + OPERATING_SYSTEMS = ["Linux", "Windows"] + SCOPES = { + "Default Access": [ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/trace.append", + ], + "Full Access": ["https://www.googleapis.com/auth/cloud-platform"], + } + + NETWORK_CONFIG_MAP = {"ONE_TO_ONE_NAT": "ONE_TO_ONE_NAT"} + + VERSION = "v1" + RELATIVE_URL = "gcp/{}".format(VERSION) + ZONES = "{}/zones".format(RELATIVE_URL) + MACHINE_TYPES = "{}/machine_types".format(RELATIVE_URL) + PERSISTENT_DISKS = "{}/persistent_disks".format(RELATIVE_URL) + DISK_IMAGES = "{}/images".format(RELATIVE_URL) + NETWORKS = "{}/networks".format(RELATIVE_URL) + SUBNETWORKS = "{}/subnetworks".format(RELATIVE_URL) + FIREWALLS = "{}/firewalls".format(RELATIVE_URL) + SNAPSHOTS = "{}/snapshots".format(RELATIVE_URL) + + PROJECT_ID = "nucalm-devopos" + COMPUTE_URL = "https://www.googleapis.com/compute/v1" + PROJECT_URL = "{}/projects/{}".format(COMPUTE_URL, PROJECT_ID) diff --git a/framework/calm/dsl/providers/plugins/gcp_vm/gcp_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/gcp_vm/gcp_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..c1a15bd --- /dev/null +++ b/framework/calm/dsl/providers/plugins/gcp_vm/gcp_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,343 @@ +{% macro GcpServiceAccount() -%} + +title: GCP Service Accounts +type: object +properties: + email: + type: string + scopes: + type: array + items: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpEncryptionKey() -%} + +title: GCP Encryption Key +type: object +properties: + rawKey: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpDiskInitParams() -%} + +title: GCP Disk Initialize Params +type: object +properties: + diskName: + type: string + sourceImage: + type: string + diskSizeGb: + type: integer + default: -1 + diskType: + type: string + type: + type: string + sourceImageEncryptionKey: + {{ GcpEncryptionKey() | indent(4) }} + +{%- endmacro %} + + +{% macro GcpDisk() -%} + +title: GCP Disk +type: object +properties: + type: + type: string + disk_type: + type: string + mode: + type: string + deviceName: + type: string + source: + type: string + interface: + type: string + boot: + type: boolean + default: False + initializeParams: + {{ GcpDiskInitParams() | indent(4) }} + autoDelete: + type: boolean + default: False + diskEncryptionKey: + {{ GcpEncryptionKey() | indent(4) }} + +{%- endmacro %} + + +{% macro GcpBlankDisk() -%} + +title: GCP Blank Disk +type: object +properties: + disk_type: + type: string + autoDelete: + type: boolean + name: + type: string + sizeGb: + type: integer + default: -1 + type: + type: string + +{%- endmacro %} + + +{% macro GcpAccessConfig() -%} + +title: GCP Access Configuration +type: object +properties: + config_type: + type: string + name: + type: string + natIP: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpIpRange() -%} + +title: GCP IP Range +type: object +properties: + ipCidrRange: + type: string + subnetworkRangeName: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpNetworkInterface() -%} + +title: GCP Network Interface +type: object +properties: + type: + type: string + network: + type: string + subnetwork: + type: string + networkIP: + type: string + associatePublicIP: + type: boolean + default: True + accessConfigs: + type: array + items: + {{ GcpAccessConfig() | indent(6) }} + aliasIpRanges: + type: array + items: + {{ GcpIpRange() | indent(6) }} + +{%- endmacro %} + + +{% macro GcpMetadataItem() -%} + +title: GCP Metadata Item +type: object +properties: + key: + type: string + value: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpMetadata() -%} + +title: GCP Metadata +type: object +properties: + type: + type: string + fingerprint: + type: string + items: + type: array + items: + {{ GcpMetadataItem() | indent(6) }} + +{%- endmacro %} + + +{% macro GcpTag() -%} + +title: GCP Tags +type: object +properties: + fingerprint: + type: string + items: + type: array + items: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpScheduling() -%} + +title: GCP Schedule +type: object +properties: + onHostMaintenance: + type: string + default: TERMINATE + automaticRestart: + type: boolean + preemptible: + type: boolean + type: + type: string + +{%- endmacro %} + + +{% macro GcpKeyValuePair() -%} + +title: GCP Key Value Pair +type: object +properties: + key: + type: string + value: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpGuestCustomization() -%} + +title: Gcp Guest Customization +type: object +properties: + startupScript: + type: string + sysprep: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro GcpVMResource() -%} + +title: GCP VM Resource +type: object +properties: + name: + type: string + type: + type: string + description: + type: string + tags: + {{ GcpTag() | indent(4) }} + machineType: + type: string + zone: + type: string + account_uuid: + type: string + canIpForward: + type: boolean + networkInterfaces: + type: array + items: + {{ GcpNetworkInterface() | indent(6) }} + disks: + type: array + items: + {{ GcpDisk() | indent(6) }} + blankDisks: + type: array + items: + {{ GcpBlankDisk() | indent(6) }} + metadata: + {{ GcpMetadata() | indent(4) }} + serviceAccounts: + type: array + items: + {{ GcpServiceAccount() | indent(6) }} + scheduling: + {{ GcpScheduling() | indent(4) }} + labels: + type: array + items: + {{ GcpKeyValuePair() | indent(6) }} + labelFingerprint: + type: string + minCpuPlatform: + type: string + sshKeys: + type: array + items: + type: string + guestCustomization: + {{ GcpGuestCustomization() | indent(4) }} + +{%- endmacro %} + + +{% macro GcpCreateSpec() -%} + +title: GCP Create Spec +type: object +properties: + type: + type: string + enum: [PROVISION_GCP_VM, ''] + default: PROVISION_GCP_VM + resources: + {{ GcpVMResource() | indent(4) }} + +{%- endmacro %} + + +info: + title: GCP_VM + description: GCP VM spec payload using v3 API + + +components: + schemas: + provider_spec: + {{ GcpCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/gcp_vm/main.py b/framework/calm/dsl/providers/plugins/gcp_vm/main.py new file mode 100644 index 0000000..5148873 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/gcp_vm/main.py @@ -0,0 +1,951 @@ +import click +from ruamel import yaml + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.providers import get_provider_interface +from .constants import GCP as gcp + + +Provider = get_provider_interface() + + +class GcpVmProvider(Provider): + + provider_type = "GCP_VM" + package_name = __name__ + spec_template_file = "gcp_vm_provider_spec.yaml.jinja2" + + @classmethod + def create_spec(cls): + client = get_api_client() + create_spec(client) + + @classmethod + def get_api_obj(cls): + """returns object to call gcpprovider specific apis""" + + client = get_api_client() + return GCP(client.connection) + + +class GCP: + def __init__(self, connection): + self.connection = connection + + def zones(self, account_id, region="undefined"): + Obj = get_resource_api(gcp.ZONES, self.connection) + payload = {"filter": "account_uuid=={};region=={}".format(account_id, region)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + entity_list.append(entity["status"]["name"]) + + return entity_list + + def machine_types(self, account_id, zone): + Obj = get_resource_api(gcp.MACHINE_TYPES, self.connection) + payload = {"filter": "account_uuid=={};zone=={}".format(account_id, zone)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + selfLink = entity["status"]["resources"]["selfLink"] + entity_map[name] = selfLink + + return entity_map + + def persistent_disks(self, account_id, zone): + Obj = get_resource_api(gcp.PERSISTENT_DISKS, self.connection) + payload = { + "filter": "account_uuid=={};zone=={};unused==true;private_only==true".format( + account_id, zone + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + self_link = entity["status"]["resources"]["selfLink"] + entity_map[name] = self_link + + return entity_map + + def snapshots(self, account_id, zone): + Obj = get_resource_api(gcp.SNAPSHOTS, self.connection) + payload = { + "filter": "account_uuid=={};zone=={};unused==true;private_only==true".format( + account_id, zone + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + selfLink = entity["status"]["resources"]["selfLink"] + entity_map[name] = selfLink + + return entity_map + + def configured_public_images(self, account_id): + Obj = get_resource_api("accounts", self.connection) + res, err = Obj.read(account_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + public_images = res["status"]["resources"]["data"]["public_images"] + public_image_map = {} + for entity in public_images: + selfLink = entity["selfLink"] + name = selfLink[selfLink.rindex("/") + 1 :] # noqa + public_image_map[name] = selfLink + + return public_image_map + + def images(self, account_id, zone): + Obj = get_resource_api(gcp.DISK_IMAGES, self.connection) + payload = { + "filter": "account_uuid=={};zone=={};unused==true;private_only==true".format( + account_id, zone + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + selfLink = entity["status"]["resources"]["selfLink"] + entity_map[name] = selfLink + + return entity_map + + def disk_images(self, account_id, zone): + """ + Returns gcpImages + gcpSnapshots + configuredPublicImages + """ + + image_map = {} + image_map.update(self.configured_public_images(account_id)) + image_map.update(self.snapshots(account_id, zone)) + image_map.update(self.images(account_id, zone)) + + return image_map + + def networks(self, account_id, zone): + Obj = get_resource_api(gcp.NETWORKS, self.connection) + payload = { + "filter": "account_uuid=={};zone=={};unused==true;private_only==true".format( + account_id, zone + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + selfLink = entity["status"]["resources"]["selfLink"] + entity_map[name] = selfLink + + return entity_map + + def subnetworks(self, account_id, zone): + Obj = get_resource_api(gcp.SUBNETWORKS, self.connection) + payload = { + "filter": "account_uuid=={};zone=={};unused==true;private_only==true".format( + account_id, zone + ) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + selfLink = entity["status"]["resources"]["selfLink"] + entity_map[name] = selfLink + + return entity_map + + def network_tags(self, account_id): + Obj = get_resource_api(gcp.FIREWALLS, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + entity_list = [] + res = res.json() + for entity in res["entities"]: + targetTags = entity["status"]["resources"].get("targetTags") + if targetTags: + entity_list.extend(targetTags) + + return entity_list + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(client): + + spec = {} + Obj = GCP(client.connection) + + # Account Configuration + projects = client.project.get_name_uuid_map() + project_list = list(projects.keys()) + + if not project_list: + click.echo(highlight_text("No projects found!!!")) + click.echo(highlight_text("Please add first")) + return + + click.echo("\nChoose from given projects:") + for ind, name in enumerate(project_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + project_id = "" + while True: + ind = click.prompt("\nEnter the index of project", default=1) + if (ind > len(project_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + project_id = projects[project_list[ind - 1]] + click.echo("{} selected".format(highlight_text(project_list[ind - 1]))) + break + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + accounts = project["status"]["resources"]["account_reference_list"] + + reg_accounts = [] + for account in accounts: + reg_accounts.append(account["uuid"]) + + payload = {"filter": "type==gcp"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + gcp_accounts = {} + + for entity in res["entities"]: + entity_name = entity["metadata"]["name"] + entity_id = entity["metadata"]["uuid"] + if entity_id in reg_accounts: + gcp_accounts[entity_name] = entity_id + + if not gcp_accounts: + click.echo( + highlight_text("No gcp account found registered in this project !!!") + ) + click.echo("Please add one !!!") + return + + accounts = list(gcp_accounts.keys()) + spec["resources"] = {} + + click.echo("\nChoose from given GCP accounts") + for ind, name in enumerate(accounts): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of account to be used", default=1) + if (res > len(accounts)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + account_name = accounts[res - 1] + account_id = gcp_accounts[account_name] # TO BE USED + + spec["resources"]["account_uuid"] = account_id + click.echo("{} selected".format(highlight_text(account_name))) + break + + click.echo("\nChoose from given Operating System types:") + os_types = gcp.OPERATING_SYSTEMS + + for ind, name in enumerate(os_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of operating system", default=1) + if (ind > len(os_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + vm_os = os_types[ind - 1] + click.echo("{} selected".format(highlight_text(vm_os))) + break + + # VM Configuration + vm_name = "vm-@@{calm_unique_hash}@@-@@{calm_array_index}@@" + spec["resources"]["name"] = click.prompt("\nEnter instance name", default=vm_name) + + zone_names = Obj.zones(account_id) + click.echo("\nChoose from given zones") + for ind, name in enumerate(zone_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of zone", default=1) + if ind > len(zone_names): + click.echo("Invalid index !!! ") + + else: + zone = zone_names[ind - 1] # TO BE USED + spec["resources"]["zone"] = zone + click.echo("{} selected".format(highlight_text(zone))) + break + + machine_type_map = Obj.machine_types(account_id, zone) + entity_names = list(machine_type_map.keys()) + click.echo("\nChoose from given machine types") + for ind, name in enumerate(entity_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of machine type", default=1) + if ind > len(entity_names): + click.echo("Invalid index !!! ") + + else: + machine_type = entity_names[ind - 1] + click.echo("{} selected".format(highlight_text(machine_type))) + spec["resources"]["machineType"] = machine_type_map[machine_type] + break + + # Disk Details + spec["resources"]["disks"] = get_disks(Obj, account_id, zone) + + # Blank Disk details + spec["resources"]["blankDisks"] = get_blank_disks(zone) + + # Networks + spec["resources"]["networkInterfaces"] = get_networks(Obj, account_id, zone) + + # SSH keys + spec["resources"]["sshKeys"] = get_ssh_keys() + metadata = {} + metadata["items"] = [] + block_project_ssh_keys = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to block project-wide SSH keys")), + default="n", + ) + + if block_project_ssh_keys[0] == "y": + metadata["items"].append({"value": "true", "key": "block-project-ssh-keys"}) + + # Management + click.echo("\n\t\t", nl=False) + click.secho("Management (Optional)", bold=True, underline=True) + + # Guest Customization + guest_customization = {} + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add Customization script")), + default="n", + ) + + if choice[0] == "y": + if vm_os == "Linux": + startup_script = click.prompt("\nEnter Startup script", default="") + guest_customization = {"startupScript": startup_script} + + else: + sysprep = click.prompt("\nEnter Sysprep powershell script", default="") + guest_customization = {"sysprep": sysprep} + + spec["resources"]["guestCustomization"] = guest_customization + + # METADATA TAGS + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add key value pairs to metadata")), + default="n", + ) + while choice[0] == "y": + Key = click.prompt("\n\tKey", default="") + Value = click.prompt("\tValue", default="") + + metadata["items"].append({"key": Key, "value": Value}) + choice = choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more key value pairs")), + default="n", + ) + + spec["resources"]["metadata"] = metadata + + # NETWORK TAGS + network_tags = [] + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add network tags")), default="n" + ) + if choice[0] == "y": + tag_list = Obj.network_tags(account_id) + + while choice[0] == "y": + click.echo("\nChoose from given network tags") + for ind, name in enumerate(tag_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of network tag", default=1) + if ind > len(tag_list): + click.echo("Invalid index !!! ") + + else: + network_tag = tag_list[ind - 1] + tag_list.pop(ind - 1) + network_tags.append(network_tag) + click.echo("{} selected".format(highlight_text(network_tag))) + break + + choice = choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more network tags")), + default="n", + ) + + spec["resources"]["tags"] = {} + if network_tags: + spec["resources"]["tags"] = {"items": network_tags} + + # LABELS + labels = [] + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add labels")), default="n" + ) + while choice[0] == "y": + Key = click.prompt("\n\tKey", default="") + Value = click.prompt("\n\tValue", default="") + + labels.append({"key": Key, "value": Value}) + choice = choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more labels")), default="n" + ) + + spec["resources"]["labels"] = labels + + # API Access Configuration + click.echo("\n\t\t", nl=False) + click.secho("API Access", bold=True, underline=True) + + service_account_email = click.prompt("\nEnter the Service Account Email") + click.echo("\nChoose from given Scopes:") + scopes = list(gcp.SCOPES.keys()) + + for ind, name in enumerate(scopes): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of scope", default=1) + if (ind > len(os_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + scope = scopes[ind - 1] + click.echo("{} selected".format(highlight_text(scope))) + break + + service_accounts = [] + # Right now only one account is possible through UI + service_accounts.append( + {"scopes": gcp.SCOPES[scope], "email": service_account_email} + ) + + spec["resources"]["serviceAccounts"] = service_accounts + + GcpVmProvider.validate_spec(spec) + click.secho("\nCreate spec for your GCP VM:\n", underline=True) + + # As it contains ssh keys, So use width=1000 for yaml.dump + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False, width=1000))) + + +def get_disks(gcp_obj, account_id, zone): + + gcp_disks = [] + + # Boot Disk + click.echo("\n\t\t", nl=False) + click.secho("DISKS", bold=True, underline=True) + + click.secho("\n1. BOOT DISK", underline=True) + # Only persistent disks are allowed in boot disk + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to use existing disk")), default="n" + ) + choice = True if choice[0] == "y" else False + + # Same set of persistent disk will be used for additional disks too + persistent_disk_map = gcp_obj.persistent_disks(account_id, zone) + if choice: + entity_names = list(persistent_disk_map.keys()) + click.echo("\nChoose from given disks") + for ind, name in enumerate(entity_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of disk", default=1) + if ind > len(entity_names): + click.echo("Invalid index !!! ") + + else: + disk_name = entity_names[ind - 1] + click.echo("{} selected".format(highlight_text(disk_name))) + break + + init_params = {} + disk_data = {"source": persistent_disk_map[disk_name]} + persistent_disk_map.pop(disk_name) + + else: + storage_types = gcp.STORAGE_TYPES + click.echo("\nChoose from given storage types") + for ind, name in enumerate(storage_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of storage type", default=1) + if ind > len(storage_types): + click.echo("Invalid index !!! ") + + else: + storage_type = storage_types[ind - 1] + click.echo("{} selected".format(highlight_text(storage_type))) + break + + source_image_map = gcp_obj.disk_images(account_id, zone) + image_names = list(source_image_map.keys()) + + click.echo("\nChoose from given source images") + for ind, name in enumerate(image_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of source images", default=1) + if ind > len(image_names): + click.echo("Invalid index !!! ") + + else: + source_image = image_names[ind - 1] + source_image_link = source_image_map[source_image] + click.echo("{} selected".format(highlight_text(source_image))) + break + + disk_size = click.prompt("\nEnter the size of disk in GB", default=-1) + disk_type_link = "{}/zones/{}/diskTypes/{}".format( + gcp.PROJECT_URL, zone, storage_type + ) + disk_data = {} + init_params = { + "diskType": disk_type_link, + "sourceImage": source_image_link, + "diskSizeGb": disk_size, + } + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to delete when instance is deleted")), + default="n", + ) + auto_delete = True if choice[0] == "y" else False + + disk_data.update( + { + "disk_type": "PERSISTENT", + "boot": True, + "autoDelete": auto_delete, + "initializeParams": init_params, + } + ) + gcp_disks.append(disk_data) + + # Additional disks + click.secho("\n2. ADDITIONAL DISK", underline=True) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add additional disks")), default="n" + ) + + disk_ind = 0 + while choice[0] == "y": + click.echo("\n\t\t", nl=False) + click.secho("ADDITIONAL DISK - {}".format(disk_ind), underline=True) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to use existing disk")), default="n" + ) + choice = True if choice[0] == "y" else False + + if choice: + entity_names = list(persistent_disk_map.keys()) + + click.echo("\nChoose from given disks") + for ind, name in enumerate(entity_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of disk", default=1) + if ind > len(entity_names): + click.echo("Invalid index !!! ") + + else: + disk_name = entity_names[ind - 1] + click.echo("{} selected".format(highlight_text(disk_name))) + break + + init_params = {} + disk_data = { + "source": persistent_disk_map[disk_name], + "disk_type": "PERSISTENT", + } + persistent_disk_map.pop(disk_name) # Pop used disk + + else: + storage_types = gcp.ADDITIONAL_DISK_STORAGE_TYPES + click.echo("\nChoose from given storage types") + for ind, name in enumerate(storage_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of storage type", default=1) + if ind > len(storage_types): + click.echo("Invalid index !!! ") + + else: + storage_type = storage_types[ind - 1] + click.echo("{} selected".format(highlight_text(storage_type))) + break + + disk_type_link = "{}/zones/{}/diskTypes/{}".format( + gcp.PROJECT_URL, zone, storage_type + ) + + if storage_type == "local-ssd": + interfaces = gcp.DISK_INTERFACES + click.echo("\nChoose from given interfaces") + for ind, name in enumerate(interfaces): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of disk interface", default=1) + if ind > len(interfaces): + click.echo("Invalid index !!! ") + + else: + disk_interface = interfaces[ind - 1] + click.echo("{} selected".format(highlight_text(disk_interface))) + break + + if disk_interface == "SCSI": + disk_interface = "" + + disk_data = { + "interface": disk_interface, + "disk_type": gcp.STORAGE_DISK_MAP[storage_type], + } + init_params = {"diskType": disk_type_link} + + else: + source_image_map = gcp_obj.disk_images(account_id, zone) + image_names = list(source_image_map.keys()) + + click.echo("\nChoose from given source images") + for ind, name in enumerate(image_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of source images", default=1) + if ind > len(image_names): + click.echo("Invalid index !!! ") + + else: + source_image = image_names[ind - 1] + source_image_link = source_image_map[source_image] + click.echo("{} selected".format(highlight_text(source_image))) + break + + disk_size = click.prompt("\nEnter the size of disk in GB", default=-1) + disk_data = {"disk_type": gcp.STORAGE_DISK_MAP[storage_type]} + init_params = { + "diskType": disk_type_link, + "sourceImage": source_image_link, + "diskSizeGb": disk_size, + } + + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text("Want to delete when instance is deleted") + ), + default="n", + ) + auto_delete = True if choice[0] == "y" else False + + disk_data.update( + {"boot": False, "autoDelete": auto_delete, "initializeParams": init_params} + ) + + gcp_disks.append(disk_data) + disk_ind += 1 + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more additional disks")), + default="n", + ) + + return gcp_disks + + +def get_blank_disks(zone): + + click.echo("\n\t\t", nl=False) + click.secho("BLANK DISKS", bold=True, underline=True) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add blank disks")), default="n" + ) + + blank_disks = [] + bdisk_ind = 0 + while choice[0] == "y": + click.echo("\n\t\t", nl=False) + click.secho("BLANK DISK - {}".format(bdisk_ind), underline=True) + + storage_types = gcp.STORAGE_TYPES + click.echo("\nChoose from given storage types") + for ind, name in enumerate(storage_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of storage type", default=1) + if ind > len(storage_types): + click.echo("Invalid index !!! ") + + else: + storage_type = storage_types[ind - 1] + click.echo("{} selected".format(highlight_text(storage_type))) + break + + disk_type_url = "{}/zones/{}/diskTypes/{}".format( + gcp.PROJECT_URL, zone, storage_type + ) + disk_name = click.prompt( + "\nEnter Disk Name", + default="vm-@@{calm_array_index}@@-@@{calm_time}@@-blankdisk-" + + str(bdisk_ind + 1), + ) + disk_size = click.prompt("\nEnter the size of disk in GB", default=-1) + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text("Want to delete when instance is deleted") + ), + default="n", + ) + auto_delete = True if choice[0] == "y" else False + + blank_disks.append( + { + "disk_type": disk_type_url, + "name": disk_name, + "sizeGb": disk_size, + "autoDelete": auto_delete, + } + ) + + bdisk_ind += 1 + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more blank disks")), + default="n", + ) + + return blank_disks + + +def get_networks(gcp_obj, account_id, zone): + + networks = [] + click.echo("\n\t\t", nl=False) + click.secho("NETWORKS", bold=True, underline=True) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add networks")), default="n" + ) + + if choice[0] == "y": + network_map = gcp_obj.networks(account_id, zone) + subnetwork_map = gcp_obj.subnetworks(account_id, zone) + + nic_index = 0 + while choice[0] == "y": + click.echo("\n\t\t", nl=False) + click.secho("Network {}".format(nic_index), underline=True) + + if not network_map: + click.secho(highlight_text("\nNo more networks found !!!")) + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to associate public ip address")), + default="n", + ) + associate_public_ip = True if choice[0] == "y" else False + + network_names = list(network_map.keys()) + click.echo("\nChoose from given networks") + for ind, name in enumerate(network_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of network", default=1) + if ind > len(network_names): + click.echo("Invalid index !!! ") + + else: + network = network_names[ind - 1] + click.echo("{} selected".format(highlight_text(network))) + break + + subnetwork_names = list(subnetwork_map.keys()) + click.echo("\nChoose from given subnetworks") + for ind, name in enumerate(subnetwork_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of subnetwork", default=1) + if ind > len(subnetwork_names): + click.echo("Invalid index !!! ") + + else: + subnetwork = subnetwork_names[ind - 1] + click.echo("{} selected".format(highlight_text(subnetwork))) + break + + if associate_public_ip: + nic_configs = list(gcp.NETWORK_CONFIG_MAP.keys()) + click.echo("\nChoose from given access configuration types") + for ind, name in enumerate(nic_configs): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt( + "\nEnter the index of access configuration type", default=1 + ) + if ind > len(nic_configs): + click.echo("Invalid index !!! ") + + else: + access_config_type = nic_configs[ind - 1] + click.echo("{} selected".format(highlight_text(access_config_type))) + break + + config_name = click.prompt("\nEnter Access Configuration Name", default="") + networks.append( + { + "network": network_map[network], + "subnetwork": subnetwork_map[subnetwork], + "accessConfigs": [ + {"name": config_name, "config_type": access_config_type} + ], + "associatePublicIP": True, + } + ) + + else: + networks.append( + { + "network": network_map[network], + "subnetwork": subnetwork_map[subnetwork], + "associatePublicIP": False, + } + ) + network_map.pop(network) # Pop out used network + nic_index += 1 + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more networks")), default="n" + ) + + return networks + + +def get_ssh_keys(): + def check_key_format(key): + arr = key.split(" ") + arr_len = len(arr) + + if arr_len != 3: + return False + + elif not arr[2].find("@"): + return False + + return True + + def format_key(key): + arr = key.split(" ") + username = arr[2].split("@")[0] + result = "{}:{}".format(username, key) + return result + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add ssh keys")), default="n" + ) + ssh_keys = [] + if choice[0] == "y": + click.echo( + highlight_text("\n\tFormat: ' '") + ) + + while choice[0] == "y": + key = click.prompt("\nEnter ssh key", default="") + if key: + if not check_key_format(key): + click.echo("Invalid key, look at the format") + continue + + formated_key = format_key(key) + ssh_keys.append(formated_key) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more ssh keys")), + default="n", + ) + else: + break + + return ssh_keys diff --git a/framework/calm/dsl/providers/plugins/k8s/__init__.py b/framework/calm/dsl/providers/plugins/k8s/__init__.py new file mode 100644 index 0000000..f89ab95 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/k8s/__init__.py @@ -0,0 +1,4 @@ +from .main import K8sProvider + + +__all__ = ["K8sProvider"] diff --git a/framework/calm/dsl/providers/plugins/k8s/k8s_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/k8s/k8s_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..075e241 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/k8s/k8s_provider_spec.yaml.jinja2 @@ -0,0 +1,18 @@ +{% macro K8sCreateSpec() -%} +title: K8S CreateSpec +type: object +properties: + name: + type: string + +{%- endmacro %} + +info: + title: K8S_POD + description: Kubernete spec + version: #TODO + +components: + schemas: + provider_spec: + {{ K8sCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/providers/plugins/k8s/main.py b/framework/calm/dsl/providers/plugins/k8s/main.py new file mode 100644 index 0000000..0c3f2c8 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/k8s/main.py @@ -0,0 +1,17 @@ +from calm.dsl.providers import get_provider_interface + + +Provider = get_provider_interface() + + +# Implements Provider interface for K8S_POD +class K8sProvider(Provider): + + provider_type = "K8S_POD" + package_name = __name__ + spec_template_file = "k8s_provider_spec.yaml.jinja2" + + @classmethod + def validate_spec(cls, spec): + # TODO - Add validation for K8S spec + pass diff --git a/framework/calm/dsl/providers/plugins/vmware_vm/__init__.py b/framework/calm/dsl/providers/plugins/vmware_vm/__init__.py new file mode 100644 index 0000000..4bc879a --- /dev/null +++ b/framework/calm/dsl/providers/plugins/vmware_vm/__init__.py @@ -0,0 +1,4 @@ +from .main import VCenterVmProvider + + +__all__ = ["VCenterVmProvider"] diff --git a/framework/calm/dsl/providers/plugins/vmware_vm/constants.py b/framework/calm/dsl/providers/plugins/vmware_vm/constants.py new file mode 100644 index 0000000..9f88852 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/vmware_vm/constants.py @@ -0,0 +1,124 @@ +class VCENTER: + + VERSION = "v6" + DATACENTER = "vmware/{}/datacenter".format(VERSION) + TEMPLATE = "vmware/{}/template".format(VERSION) + CONTENT_LIBRARY = "vmware/{}/library".format(VERSION) + CONTENT_LIBRARY_TEMPLATE = "vmware/{}/library_items".format(VERSION) + DATASTORE = "vmware/{}/datastore".format(VERSION) + HOST = "vmware/{}/host".format(VERSION) + CLUSTER = "vmware/{}/cluster".format(VERSION) + STORAGE_POD = "vmware/{}/storage_pod".format(VERSION) + NETWORK = "vmware/{}/network".format(VERSION) + NETWORK_ADAPTER = "vmware/{}/network_adapter".format(VERSION) + CUSTOMIZATION = "vmware/{}/customization".format(VERSION) + TIMEZONE = "vmware/{}/timezone".format(VERSION) + ACCOUNTS = "vmware/{}/accounts".format(VERSION) + TEMPLATE_DEFS = ("vmware/{}/accounts/".format(VERSION)) + "{}/templates" + FILE_PATHS = "vmware/{}/file_paths".format(VERSION) + TAGS = "vmware/{}/vm_categories".format(VERSION) + POWER_STATE = { + "POWER_ON": "poweron", + "POWER_OFF": "poweroff", + "ON": "ON", + "OFF": "OFF", + } + + DISK_ADAPTER_TYPES = {"SCSI": "SCSI", "IDE": "IDE", "SATA": "SATA"} + + DISK_TYPES = {"DISK": "disk", "CD-ROM": "cdrom"} + + DISK_ADAPTERS = {"disk": ["SCSI", "SATA"], "cdrom": ["IDE"]} + + DISK_MODE = { + "Independent - Persistent": "independent_persistent", + "Dependent": "persistent", + "Independent - Nonpersistent": "independent_nonpersistent", + } + + CONTROLLER = { + "SCSI": { + "Lsi Logic Parallel": "VirtualLsiLogicController", + "Lsi Logic SAS": "VirtualLsiLogicSASController", + "VMware Paravirtual": "ParaVirtualSCSIController", + "Bus Logic Parallel": "VirtualBusLogicController", + }, + "SATA": {"Virtual SATA Controller": "VirtualAHCIController"}, + } + + SCSIControllerOptions = { + "VirtualLsiLogicController": "Lsi Logic Parallel", + "VirtualLsiLogicSASController": "Lsi Logic SAS", + "ParaVirtualSCSIController": "VMware Paravirtual", + "VirtualBusLogicController": "Bus Logic Parallel", + } + + SATAControllerOptions = {"VirtualAHCIController": "Virtual SATA Controller"} + + BUS_SHARING = { + "No Sharing": "noSharing", + "Virtual Sharing": "virtualSharing", + "Physical Sharing": "physicalSharing", + } + + KEY_BASE = { + "CONTROLLER": {"SCSI": 1000, "SATA": 15000, "IDE": 200}, + "NETWORK": 4000, + "DISK": 2000, + } + + ControllerLimit = {"SCSI": 4, "SATA": 4, "IDE": 2} + + OperatingSystem = {"Linux": "GUEST_OS_LINUX", "Windows": "GUEST_OS_WINDOWS"} + + GuestCustomizationModes = { + "Linux": ["Cloud Init", "Custom Spec", "Predefined Customization"], + "Windows": ["Predefined Customization", "Custom Spec"], + } + + VirtualControllerNameMap = { + "vim.vm.device.VirtualIDEController": "VirtualIDEController", + "vim.vm.device.VirtualLsiLogicSASController": "VirtualLsiLogicSASController", + "vim.vm.device.VirtualSCSIController": "VirtualSCSIController", + "vim.vm.device.VirtualSATAController": "VirtualSATAController", + "vim.vm.device.ParaVirtualSCSIController": "ParaVirtualSCSIController", + "vim.vm.device.VirtualAHCIController": "VirtualAHCIController", + "vim.vm.device.VirtualBusLogicController": "VirtualBusLogicController", + "vim.vm.device.VirtualLsiLogicController": "VirtualLsiLogicController", + } + + ControllerMap = { + "vim.vm.device.VirtualIDEController": "IDE", + "vim.vm.device.VirtualLsiLogicSASController": "SCSI", + "vim.vm.device.VirtualSCSIController": "SCSI", + "vim.vm.device.VirtualSATAController": "SATA", + "vim.vm.device.ParaVirtualSCSIController": "SCSI", + "vim.vm.device.VirtualAHCIController": "SATA", + "vim.vm.device.VirtualBusLogicController": "SCSI", + "vim.vm.device.VirtualLsiLogicController": "SCSI", + } + + ControllerDeviceSlotMap = { + "VirtualIDEController": 2, + "VirtualLsiLogicSASController": 16, + "VirtualSCSIController": 16, + "VirtualSATAController": 30, + "ParaVirtualSCSIController": 16, + "VirtualAHCIController": 30, + "VirtualBusLogicController": 16, + "VirtualLsiLogicController": 16, + } + + NetworkAdapterMap = { + "vim.vm.device.VirtualE1000": "e1000", + "vim.vm.device.VirtualE1000e": "e1000e", + "vim.vm.device.VirtualPCNet32": "pcnet32", + "vim.vm.device.VirtualVmxnet": "vmxnet", + "vim.vm.device.VirtualVmxnet2": "vmxnet2", + "vim.vm.device.VirtualVmxnet3": "vmxnet3", + } + + DiskMap = { + "vim.vm.device.VirtualDisk": "disk", + "vim.vm.device.VirtualCdrom": "cdrom", + } diff --git a/framework/calm/dsl/providers/plugins/vmware_vm/main.py b/framework/calm/dsl/providers/plugins/vmware_vm/main.py new file mode 100644 index 0000000..0f3d57f --- /dev/null +++ b/framework/calm/dsl/providers/plugins/vmware_vm/main.py @@ -0,0 +1,2311 @@ +import click +from collections import OrderedDict +from ruamel import yaml +from distutils.version import LooseVersion as LV + +from calm.dsl.api import get_resource_api, get_api_client +from calm.dsl.providers import get_provider_interface +from calm.dsl.store import Version +from .constants import VCENTER as vmw + + +Provider = get_provider_interface() + + +class VCenterVmProvider(Provider): + + provider_type = "VMWARE_VM" + package_name = __name__ + spec_template_file = "vmware_vm_provider_spec.yaml.jinja2" + + @classmethod + def create_spec(cls): + client = get_api_client() + create_spec(client) + + @classmethod + def update_vm_image_config(cls, spec, vm_template=None): + """vm_template is the downloadable class""" + if vm_template: + spec["template"] = vm_template.__name__ + + @classmethod + def get_api_obj(cls): + """returns object to call vmware provider specific apis""" + client = get_api_client() + # TODO remove this mess + from calm.dsl.store.version import Version + + calm_version = Version.get_version("Calm") + api_handlers = VCenterBase.api_handlers + + # Return min version that is greater or equal to user calm version + supported_versions = [] + for k in api_handlers.keys(): + if LV(k) <= LV(calm_version): + supported_versions.append(k) + + latest_version = max(supported_versions, key=lambda x: LV(x)) + api_handler = api_handlers[latest_version] + return api_handler(client.connection) + + +class VCenterBase: + """Base class for vmware provider specific apis""" + + api_handlers = OrderedDict() + __api_version__ = None + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + version = getattr(cls, "__api_version__") + if version: + cls.api_handlers[version] = cls + + @classmethod + def get_version(cls): + return getattr(cls, "__api_version__") + + def hosts(self, *args, **kwargs): + raise NotImplementedError("hosts call not implemented") + + def datastores(self, *args, **kwargs): + raise NotImplementedError("datastores call not implemented") + + def storage_pods(self, *args, **kwargs): + raise NotImplementedError("storage_pods call not implemented") + + def templates(self, *args, **kwargs): + raise NotImplementedError("templates call not implemented") + + def content_library(self, *args, **kwargs): + raise NotImplementedError("content_library call not implemented") + + def content_library_templates(self, *args, **kwargs): + raise NotImplementedError("content_library_templates call not implemented") + + def customizations(self, *args, **kwargs): + raise NotImplementedError("customizations call not implemented") + + def timezones(self, *args, **kwargs): + raise NotImplementedError("timezones call not implemented") + + def networks(self, *args, **kwargs): + raise NotImplementedError("networks call not implemented") + + def tags(self, *args, **kwargs): + raise NotImplementedError("tags call not implemented") + + def file_paths(self, *args, **kwargs): + raise NotImplementedError("file_paths call not implemented") + + def template_defaults(self, *args, **kwargs): + raise NotImplementedError("template_defaults call not implemented") + + +class VCenterV1(VCenterBase): + """vmware api object for calm_version >= 3.5.0""" + + __api_version__ = "3.5.0" + + def __init__(self, connection): + self.connection = connection + + def hosts(self, account_id): + Obj = get_resource_api(vmw.HOST, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + name_id_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + entity_uuid = entity["status"]["resources"]["summary"]["hardware"]["uuid"] + name_id_map[name] = entity_uuid + + return name_id_map + + def datastores(self, account_id, cluster_name=None, host_id=None): + Obj = get_resource_api(vmw.DATASTORE, self.connection) + payload = "" + if host_id: + payload = { + "filter": "account_uuid=={};host_id=={}".format(account_id, host_id) + } + + if cluster_name: + payload = { + "filter": "account_uuid=={};cluster_name=={}".format( + account_id, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_url_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + url = entity["status"]["resources"]["summary"]["url"] + name_url_map[name] = url + + return name_url_map + + def clusters(self, account_id): + Obj = get_resource_api(vmw.CLUSTER, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + cluster_list = [] + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + cluster_list.append(name) + + return cluster_list + + def storage_pods(self, account_id): + Obj = get_resource_api(vmw.STORAGE_POD, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + pod_list = [] + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + pod_list.append(name) + + return pod_list + + def templates(self, account_id): + Obj = get_resource_api(vmw.TEMPLATE, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + temp_id = entity["status"]["resources"]["config"]["instanceUuid"] + name_id_map[name] = temp_id + + return name_id_map + + def content_library(self, account_id): + Obj = get_resource_api(vmw.CONTENT_LIBRARY, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + temp_id = entity["status"]["resources"]["id"] + name_id_map[name] = temp_id + + return name_id_map + + def content_library_templates(self, account_id, library_id): + Obj = get_resource_api(vmw.CONTENT_LIBRARY_TEMPLATE, self.connection) + payload = { + "filter": "account_uuid=={};library_id=={}".format(account_id, library_id) + } + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + id = entity["status"]["resources"]["id"] + type = entity["status"]["resources"]["type"] + name_id_map[name] = {"id": id, "type": type} + + return name_id_map + + def customizations(self, account_id, os): + + Obj = get_resource_api(vmw.CUSTOMIZATION, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + cust_list = [] + for entity in res["entities"]: + if entity["status"]["resources"]["type"] == os: + cust_list.append(entity["status"]["resources"]["name"]) + + return cust_list + + def timezones(self, os): + + Obj = get_resource_api(vmw.TIMEZONE, self.connection) + payload = {"filter": "guest_os=={};".format(os)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_ind_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + ind = entity["status"]["resources"]["index"] + name_ind_map[name] = ind + + return name_ind_map + + def networks(self, account_id, host_id=None, cluster_name=None): + Obj = get_resource_api(vmw.NETWORK, self.connection) + payload = "" + if host_id: + payload = { + "filter": "account_uuid=={};host_id=={}".format(account_id, host_id) + } + + if cluster_name: + payload = { + "filter": "account_uuid=={};cluster_name=={}".format( + account_id, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + entity_id = entity["status"]["resources"]["id"] + + name_id_map[name] = entity_id + + return name_id_map + + def tags(self, account_id): + obj = get_resource_api(vmw.TAGS, self.connection) + payload = {"filter": "account_uuid=={}".format(account_id)} + res, err = obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_tag_id_map = {} + name_cardinality_map = {} + for entity in res.get("entities"): + name = entity["status"]["resources"]["name"] + name_cardinality_map[name] = entity["status"]["resources"]["cardinality"] + for tag in entity["status"]["resources"]["tags"]: + key = str(entity["status"]["resources"]["name"] + ":" + tag["name"]) + name_tag_id_map[key] = { + "id": tag["id"], + "name": entity["status"]["resources"]["name"], + "tag_name": tag["name"], + } + + return {"tag_list": name_tag_id_map, "cardinality_list": name_cardinality_map} + + def file_paths( + self, + account_id, + datastore_url=None, + file_extension="iso", + host_id=None, + cluster_name=None, + ): + + Obj = get_resource_api(vmw.FILE_PATHS, self.connection) + payload = "" + if datastore_url: + payload = { + "filter": "account_uuid=={};file_extension=={};datastore_url=={}".format( + account_id, file_extension, datastore_url + ) + } + elif host_id: + payload = { + "filter": "account_uuid=={};file_extension=={};host_id=={}".format( + account_id, file_extension, host_id + ) + } + else: + payload = { + "filter": "account_uuid=={};file_extension=={};cluster_name=={}".format( + account_id, file_extension, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + fpaths = [] + for entity in res["entities"]: + fpaths.append(entity["status"]["resources"]) + + return fpaths + + def template_defaults( + self, account_id, template_id, is_library + ): # TODO improve this mess + payload = { + "filter": 'template_uuids==["{}"];is_library=={}'.format( + template_id, str(is_library).lower() + ) + } + Obj = get_resource_api(vmw.TEMPLATE_DEFS.format(account_id), self.connection) + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + tempControllers = {} + tempDisks = [] + tempNics = [] + free_device_slots = {} + controller_count = {"SCSI": 0, "SATA": 0, "IDE": 0} + controller_key_type_map = { + 1000: ("SCSI", None), + 15000: ("SATA", None), + 200: ("IDE", None), + } + controller_label_key_map = {"SCSI": {}, "SATA": {}, "IDE": {}} + + controllers = [] + disks = [] + networks = [] + + for entity in res["entities"]: + entity_config = entity["status"]["resources"]["config"] + entity_id = entity_config["instanceUuid"] if not is_library else "" + if entity_id == template_id or is_library: + controllers = entity_config["hardware"]["device"]["controller"] or [] + disks = entity_config["hardware"]["device"]["disk"] or [] + networks = entity_config["hardware"]["device"]["network"] or [] + break + + for controller in controllers: + contlr = {} + + label = controller["label"] + free_device_slots[label] = controller["freeDeviceSlots"] + type = controller["type"] + if vmw.ControllerMap.get(type): + controller_type = vmw.ControllerMap[type] + else: + controller_type = "IDE" # SCSI/SATA/IDE + + ctlr_type = vmw.VirtualControllerNameMap[type] + + if controller_type == "SCSI": + contlr["controller_type"] = vmw.SCSIControllerOptions[ctlr_type] + + elif controller_type == "SATA": + contlr["controller_type"] = vmw.SATAControllerOptions[ctlr_type] + + contlr["key"] = controller["key"] + controller_label_key_map[controller_type][label] = contlr["key"] + controller_key_type_map[contlr["key"]] = (controller_type, label) + + controller_count[controller_type] += 1 + if controller_type == "SCSI": + contlr["bus_sharing"] = controller["sharedBus"] + + if not tempControllers.get(controller_type): + tempControllers[controller_type] = [] + + tempControllers[controller_type].append(contlr) + + disk_mode_inv = {v: k for k, v in vmw.DISK_MODE.items()} + for disk in disks: + dsk = {} + + dsk["disk_type"] = vmw.DiskMap[disk["type"]] + dsk["key"] = disk["key"] + dsk["controller_key"] = ( + disk["controllerKey"] if "controllerKey" in disk.keys() else "" + ) + + if "controllerKey" in disk.keys() and controller_key_type_map.get( + disk["controllerKey"] + ): + dsk["adapter_type"] = controller_key_type_map.get( + disk["controllerKey"] + )[0] + else: + # Taken from VMwareTemplateDisks.jsx + dsk["adapter_type"] = "IDE" + + if dsk["disk_type"] == "disk": + dsk["size"] = disk["capacityInKB"] // 1024 + if "backing" in disk.keys(): + dsk["mode"] = disk_mode_inv[disk["backing"]["diskMode"]] + dsk["location"] = ( + disk["backing"]["datastore"]["url"], + disk["backing"]["datastore"]["name"], + ) + dsk["device_slot"] = disk["unitNumber"] + + tempDisks.append(dsk) + + for network in networks: + nic = {} + nic["key"] = network["key"] + nic["net_name"] = network["backing"]["network"]["name"] + nic["nic_type"] = vmw.NetworkAdapterMap.get(network["type"], "") + + tempNics.append(nic) + + response = { + "tempControllers": tempControllers, + "tempDisks": tempDisks, + "tempNics": tempNics, + "free_device_slots": free_device_slots, + "controller_count": controller_count, + "controller_key_type_map": controller_key_type_map, + "controller_label_key_map": controller_label_key_map, + } + + return response + + +class VCenterV0(VCenterBase): + """vmware api object for calm_version < 3.5.0""" + + __api_version__ = "0" + + def __init__(self, connection): + self.connection = connection + + def hosts(self, account_id): + Obj = get_resource_api(vmw.HOST, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + name_id_map = {} + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + entity_uuid = entity["status"]["resources"]["summary"]["hardware"]["uuid"] + name_id_map[name] = entity_uuid + + return name_id_map + + def datastores(self, account_id, cluster_name=None, host_id=None): + Obj = get_resource_api(vmw.DATASTORE, self.connection) + payload = "" + if host_id: + payload = { + "filter": "account_uuid=={};host_id=={}".format(account_id, host_id) + } + + if cluster_name: + payload = { + "filter": "account_uuid=={};cluster_name=={}".format( + account_id, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_url_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + url = entity["status"]["resources"]["summary"]["url"] + name_url_map[name] = url + + return name_url_map + + def clusters(self, account_id): + Obj = get_resource_api(vmw.CLUSTER, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + cluster_list = [] + res = res.json() + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + cluster_list.append(name) + + return cluster_list + + def storage_pods(self, account_id): + Obj = get_resource_api(vmw.STORAGE_POD, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + pod_list = [] + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + pod_list.append(name) + + return pod_list + + def templates(self, account_id): + Obj = get_resource_api(vmw.TEMPLATE, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + temp_id = entity["status"]["resources"]["config"]["instanceUuid"] + name_id_map[name] = temp_id + + return name_id_map + + def customizations(self, account_id, os): + + Obj = get_resource_api(vmw.CUSTOMIZATION, self.connection) + payload = {"filter": "account_uuid=={};".format(account_id)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + cust_list = [] + for entity in res["entities"]: + if entity["status"]["resources"]["type"] == os: + cust_list.append(entity["status"]["resources"]["name"]) + + return cust_list + + def timezones(self, os): + + Obj = get_resource_api(vmw.TIMEZONE, self.connection) + payload = {"filter": "guest_os=={};".format(os)} + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_ind_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + ind = entity["status"]["resources"]["index"] + name_ind_map[name] = ind + + return name_ind_map + + def networks(self, account_id, host_id=None, cluster_name=None): + Obj = get_resource_api(vmw.NETWORK, self.connection) + payload = "" + if host_id: + payload = { + "filter": "account_uuid=={};host_id=={}".format(account_id, host_id) + } + + if cluster_name: + payload = { + "filter": "account_uuid=={};cluster_name=={}".format( + account_id, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_id_map = {} + for entity in res["entities"]: + name = entity["status"]["resources"]["name"] + entity_id = entity["status"]["resources"]["id"] + + name_id_map[name] = entity_id + + return name_id_map + + def tags(self, account_id): + obj = get_resource_api(vmw.TAGS, self.connection) + payload = {"filter": "account_uuid=={}".format(account_id)} + res, err = obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + name_tag_id_map = {} + name_cardinality_map = {} + for entity in res.get("entities"): + name = entity["status"]["resources"]["name"] + name_cardinality_map[name] = entity["status"]["resources"]["cardinality"] + for tag in entity["status"]["resources"]["tags"]: + key = str(entity["status"]["resources"]["name"] + ":" + tag["name"]) + name_tag_id_map[key] = { + "id": tag["id"], + "name": entity["status"]["resources"]["name"], + "tag_name": tag["name"], + } + + return {"tag_list": name_tag_id_map, "cardinality_list": name_cardinality_map} + + def file_paths( + self, + account_id, + datastore_url=None, + file_extension="iso", + host_id=None, + cluster_name=None, + ): + + Obj = get_resource_api(vmw.FILE_PATHS, self.connection) + payload = "" + if datastore_url: + payload = { + "filter": "account_uuid=={};file_extension=={};datastore_url=={}".format( + account_id, file_extension, datastore_url + ) + } + elif host_id: + payload = { + "filter": "account_uuid=={};file_extension=={};host_id=={}".format( + account_id, file_extension, host_id + ) + } + else: + payload = { + "filter": "account_uuid=={};file_extension=={};cluster_name=={}".format( + account_id, file_extension, cluster_name + ) + } + + res, err = Obj.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + fpaths = [] + for entity in res["entities"]: + fpaths.append(entity["status"]["resources"]) + + return fpaths + + def template_defaults(self, account_id, template_id): # TODO improve this mess + payload = {"filter": 'template_uuids==["{}"];'.format(template_id)} + Obj = get_resource_api(vmw.TEMPLATE_DEFS.format(account_id), self.connection) + res, err = Obj.list(payload) + + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + tempControllers = {} + tempDisks = [] + tempNics = [] + free_device_slots = {} + controller_count = {"SCSI": 0, "SATA": 0, "IDE": 0} + controller_key_type_map = { + 1000: ("SCSI", None), + 15000: ("SATA", None), + 200: ("IDE", None), + } + controller_label_key_map = {"SCSI": {}, "SATA": {}, "IDE": {}} + + controllers = [] + disks = [] + networks = [] + + for entity in res["entities"]: + entity_config = entity["status"]["resources"]["config"] + entity_id = entity_config["instanceUuid"] + if entity_id == template_id: + controllers = entity_config["hardware"]["device"]["controller"] or [] + disks = entity_config["hardware"]["device"]["disk"] or [] + networks = entity_config["hardware"]["device"]["network"] or [] + break + + for controller in controllers: + contlr = {} + + label = controller["label"] + free_device_slots[label] = controller["freeDeviceSlots"] + type = controller["type"] + if vmw.ControllerMap.get(type): + controller_type = vmw.ControllerMap[type] + else: + controller_type = "IDE" # SCSI/SATA/IDE + + ctlr_type = vmw.VirtualControllerNameMap[type] + + if controller_type == "SCSI": + contlr["controller_type"] = vmw.SCSIControllerOptions[ctlr_type] + + elif controller_type == "SATA": + contlr["controller_type"] = vmw.SATAControllerOptions[ctlr_type] + + contlr["key"] = controller["key"] + controller_label_key_map[controller_type][label] = contlr["key"] + controller_key_type_map[contlr["key"]] = (controller_type, label) + + controller_count[controller_type] += 1 + if controller_type == "SCSI": + contlr["bus_sharing"] = controller["sharedBus"] + + if not tempControllers.get(controller_type): + tempControllers[controller_type] = [] + + tempControllers[controller_type].append(contlr) + + disk_mode_inv = {v: k for k, v in vmw.DISK_MODE.items()} + for disk in disks: + dsk = {} + + dsk["disk_type"] = vmw.DiskMap[disk["type"]] + dsk["key"] = disk["key"] + dsk["controller_key"] = disk["controllerKey"] + + if controller_key_type_map.get(disk["controllerKey"]): + dsk["adapter_type"] = controller_key_type_map.get( + disk["controllerKey"] + )[0] + else: + # Taken from VMwareTemplateDisks.jsx + dsk["adapter_type"] = "IDE" + + if dsk["disk_type"] == "disk": + dsk["size"] = disk["capacityInKB"] // 1024 + dsk["mode"] = disk_mode_inv[disk["backing"]["diskMode"]] + dsk["location"] = ( + disk["backing"]["datastore"]["url"], + disk["backing"]["datastore"]["name"], + ) + dsk["device_slot"] = disk["unitNumber"] + + tempDisks.append(dsk) + + for network in networks: + nic = {} + nic["key"] = network["key"] + nic["net_name"] = network["backing"]["network"]["name"] + nic["nic_type"] = vmw.NetworkAdapterMap.get(network["type"], "") + + tempNics.append(nic) + + response = { + "tempControllers": tempControllers, + "tempDisks": tempDisks, + "tempNics": tempNics, + "free_device_slots": free_device_slots, + "controller_count": controller_count, + "controller_key_type_map": controller_key_type_map, + "controller_label_key_map": controller_label_key_map, + } + + return response + + +def highlight_text(text, **kwargs): + """Highlight text in our standard format""" + return click.style("{}".format(text), fg="blue", bold=False, **kwargs) + + +def create_spec(client): + + CALM_VERSION = Version.get_version("Calm") + spec = {} + Obj = VCenterVmProvider.get_api_obj() + + # VM Configuration + + projects = client.project.get_name_uuid_map() + project_list = list(projects.keys()) + + if not project_list: + click.echo(highlight_text("No projects found!!!")) + click.echo(highlight_text("Please add first")) + return + + click.echo("\nChoose from given projects:") + for ind, name in enumerate(project_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + project_id = "" + while True: + ind = click.prompt("\nEnter the index of project", default=1) + if (ind > len(project_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + project_id = projects[project_list[ind - 1]] + click.echo("{} selected".format(highlight_text(project_list[ind - 1]))) + break + + res, err = client.project.read(project_id) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + project = res.json() + accounts = project["status"]["resources"]["account_reference_list"] + + payload = {"filter": "type==vmware"} + res, err = client.account.list(payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + vmware_accounts = [] + + for entity in res["entities"]: + vmware_accounts.append(entity["metadata"]["uuid"]) + + account_id = "" + for account in accounts: + if account["uuid"] in vmware_accounts: + account_id = account["uuid"] + break + + if not account_id: + click.echo( + highlight_text("No vmware account found registered in this project !!!") + ) + click.echo("Please add one !!!") + return + + click.echo("\nChoose from given Operating System types:") + os_types = list(vmw.OperatingSystem.keys()) + for ind, name in enumerate(os_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of operating system", default=1) + if (ind > len(os_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + os = os_types[ind - 1] + click.echo("{} selected".format(highlight_text(os))) + break + + drs_mode = click.prompt("\nEnable DRS Mode(y/n)", default="n") + drs_mode = True if drs_mode[0] == "y" else False + spec["drs_mode"] = drs_mode + + if not drs_mode: + host_name_id_map = Obj.hosts(account_id) + host_names = list(host_name_id_map.keys()) + if not host_names: + click.echo("\n{}".format(highlight_text("No hosts present"))) + + else: + click.echo("\nChoose from given hosts:") + for ind, name in enumerate(host_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of host", default=1) + if (ind > len(host_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + host_name = host_names[ind - 1] + host_id = host_name_id_map[host_name] # TO BE USED + spec["host"] = host_id + click.echo("{} selected".format(highlight_text(host_name))) + break + + datastore_name_url_map = Obj.datastores(account_id, host_id=host_id) + datastore_names = list(datastore_name_url_map.keys()) + if not datastore_names: + click.echo("\n{}".format(highlight_text("No datastore present"))) + + else: + click.echo("\nChoose from given datastore:") + for ind, name in enumerate(datastore_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of datastore", default=1) + if (ind > len(datastore_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + datastore_name = datastore_names[ind - 1] + datastore_url = datastore_name_url_map[datastore_name] + spec["datastore"] = datastore_url + click.echo("{} selected".format(highlight_text(datastore_name))) + break + + else: + cluster_list = Obj.clusters(account_id) + if not cluster_list: + click.echo("\n{}".format(highlight_text("No cluster present"))) + + else: + click.echo("\nChoose from given cluster:") + for ind, name in enumerate(cluster_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of cluster", default=1) + if (ind > len(cluster_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + cluster_name = cluster_list[ind - 1] # TO BE USED + spec["cluster"] = cluster_name + click.echo("{} selected".format(highlight_text(cluster_name))) + break + + storage_pod_list = Obj.storage_pods(account_id) + if not storage_pod_list: + click.echo("\n{}".format(highlight_text("No storage pod present"))) + + else: + click.echo("\nChoose from given storage pod:") + for ind, name in enumerate(storage_pod_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of storage", default=1) + if (ind > len(storage_pod_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + pod_name = storage_pod_list[ind - 1] # TO BE USED + spec["storage_pod"] = pod_name + click.echo("{} selected".format(highlight_text(pod_name))) + break + + if LV(CALM_VERSION) >= LV("3.5.0"): + click.echo("\nChoose template source:") + for ind, name in enumerate(["VM Template", "Content Library"]): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of source of template", default=1) + if (ind > 2) or (ind <= 0): + click.echo("Invalid index !!! ") + + if ind == 1: + template_name_id_map = Obj.templates(account_id) + template_names = list(template_name_id_map.keys()) + template_id = "" + + if not template_names: + click.echo("\n{}".format(highlight_text("No templates present"))) + + else: + click.echo("\nChoose from given templates:") + for ind, name in enumerate(template_names): + click.echo( + "\t {}. {}".format(str(ind + 1), highlight_text(name)) + ) + + while True: + ind = click.prompt("\nEnter the index of template", default=1) + if (ind > len(template_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + else: + template_name = template_names[ind - 1] + # TO BE USED + template_id = template_name_id_map[template_name] + click.echo( + "{} selected".format(highlight_text(template_name)) + ) + spec["template"] = template_id + break + break + else: + spec["library"] = {} + library_name_id_map = Obj.content_library(account_id) + library_names = list(library_name_id_map.keys()) + library_id = "" + if not library_names: + click.echo("\n{}".format(highlight_text("No Library present"))) + else: + click.echo("\nChoose from given library:") + for ind, name in enumerate(library_names): + click.echo( + "\t {}. {}".format(str(ind + 1), highlight_text(name)) + ) + + while True: + ind = click.prompt("\nEnter the index of library", default=1) + if (ind > len(library_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + else: + library_name = library_names[ind - 1] + library_id = library_name_id_map[library_name] + click.echo( + "{} selected".format(highlight_text(library_name)) + ) + + library_template_name_id_map = ( + Obj.content_library_templates(account_id, library_id) + ) + library_template_names = list( + library_template_name_id_map.keys() + ) + library_template_id = "" + if not library_template_names: + click.echo( + "\n{}".format( + highlight_text( + "No templates present in the library" + ) + ) + ) + else: + click.echo("\nChoose template from given library:") + for ind, name in enumerate(library_template_names): + click.echo( + "\t {}. {}".format( + str(ind + 1), + highlight_text( + "{} ({})".format( + name, + library_template_name_id_map[name][ + "type" + ], + ) + ), + ) + ) + + while True: + ind = click.prompt( + "\nEnter the index of library template", + default=1, + ) + if (ind > len(library_template_names)) or ( + ind <= 0 + ): + click.echo("Invalid index !!! ") + else: + library_template_name = library_template_names[ + ind - 1 + ] + library_template = library_template_name_id_map[ + library_template_name + ] + library_template_id = library_template["id"] + library_template_type = library_template["type"] + click.echo( + "{} selected".format( + highlight_text(library_template_name) + ) + ) + spec["library"]["library_id"] = library_id + spec["library"][ + "library_template_id" + ] = library_template_id + spec["library"][ + "library_template_type" + ] = library_template_type + break + break + break + else: + template_name_id_map = Obj.templates(account_id) + template_names = list(template_name_id_map.keys()) + template_id = "" + + if not template_names: + click.echo("\n{}".format(highlight_text("No templates present"))) + + else: + click.echo("\nChoose from given templates:") + for ind, name in enumerate(template_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of template", default=1) + if (ind > len(template_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + template_name = template_names[ind - 1] + # TO BE USED + template_id = template_name_id_map[template_name] + click.echo("{} selected".format(highlight_text(template_name))) + break + spec["template"] = template_id + + # Check if user want to supply vmware folder path + if LV(CALM_VERSION) >= LV("3.2.0"): + spec["folder"] = {} + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text( + "Do you want to specify a destination folder for the VM to be placed in?" + ) + ), + default="n", + ) + + if choice[0] == "y": + + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text( + "Do you want to use existing folder in the platform to deploy the new VM?" + ) + ), + default="n", + ) + if choice[0] == "y": + existing_path = click.prompt( + "\nEnter the path to the existing folder in the platform. Use '/' to separate folder names along the path." + ) + spec["folder"]["existing_path"] = existing_path + + choice = click.prompt( + "\n{}(y/n)".format( + highlight_text( + "Do you want to create new folder or path for the VM? If existing folder path is specified, new path will be created under the existing path." + ) + ), + default="n", + ) + if choice[0] == "y": + new_path = click.prompt( + "\nEnter the new folder to be created. For creating a directory structure, use: Folder1/Folder2/Folder3. For simply creating a new folder, just specify the folder name." + ) + spec["folder"]["new_path"] = new_path + + delete_empty_folder = click.prompt( + "\n{}(y/n)".format( + highlight_text( + "Do you want the newly created folder/path to be deleted during application deletion if it does not contain any other resource?" + ) + ), + default="n", + ) + delete_empty_folder = True if delete_empty_folder[0] == "y" else False + spec["folder"]["delete_empty_folder"] = delete_empty_folder + + # VM Configuration + vm_name = "vm-@@{calm_unique_hash}@@-@@{calm_array_index}@@" + spec["name"] = click.prompt("\nEnter instance name", default=vm_name) + + spec["resources"] = {} + + if LV(CALM_VERSION) >= LV("3.2.0"): + # Enable CPU Hot Add + choice = click.prompt( + "\n{}\n{}(y/n)".format( + "Want to enable cpu hot add?", + highlight_text( + "Warning: Support for CPU Hot Add depends upon the Guest OS of the VM" + "\nHot updating the CPU will fail if not supported by the Guest OS." + ), + ), + default="n", + ) + spec["resources"]["cpu_hot_add"] = choice[0] == "y" + + spec["resources"]["num_sockets"] = click.prompt("\nEnter no. of vCPUs", default=1) + spec["resources"]["num_vcpus_per_socket"] = click.prompt( + "\nCores per vCPU", default=1 + ) + + if LV(CALM_VERSION) >= LV("3.2.0"): + # Enable Memory Hot Plug + choice = click.prompt( + "\n{}\n{}(y/n)".format( + "Want to enable memory hot add?", + highlight_text( + "Warning: Support for Memory Hot Plug depends upon the Guest OS of the VM" + "\nHot updating the memory will fail if not supported by the Guest OS." + ), + ), + default="n", + ) + spec["resources"]["memory_hot_plug"] = choice[0] == "y" + + spec["resources"]["memory_size_mib"] = ( + click.prompt("\nMemory(in GiB)", default=1) + ) * 1024 + + response = {} + if LV(CALM_VERSION) >= LV("3.5.0"): + is_library_template = True if "library" in spec.keys() else False + template_id = ( + spec["library"]["library_template_id"] + if is_library_template + else spec["template"] + ) + template_type = ( + spec["library"]["library_template_type"] if is_library_template else "" + ) + if template_id or (is_library_template and template_type != "ovf"): + response = Obj.template_defaults( + account_id, template_id, is_library_template + ) + else: + template_id = spec["template"] + if template_id: + response = Obj.template_defaults(account_id, template_id) + + tempControllers = response.get("tempControllers", {}) + tempDisks = response.get("tempDisks", []) + tempNics = response.get("tempNics", []) + free_device_slots = response.get("free_device_slots", {}) + controller_count = response.get("controller_count", {}) + controller_key_type_map = response.get("controller_key_type_map", {}) + controller_label_key_map = response.get("controller_label_key_map", {}) + + tempSCSIContrlr = tempControllers.get("SCSI", []) + tempSATAContrlr = tempControllers.get("SATA", []) + spec["resources"]["template_controller_list"] = [] + spec["resources"]["template_disk_list"] = [] + spec["resources"]["template_nic_list"] = [] + spec["resources"]["controller_list"] = [] + spec["resources"]["disk_list"] = [] + spec["resources"]["nic_list"] = [] + bus_sharing_inv_map = {v: k for k, v in vmw.BUS_SHARING.items()} + + if tempSATAContrlr or tempSCSIContrlr: + click.secho("\nConfig of Template Controllers:", underline=True) + else: + click.echo("\nNo template controllers found!!") + + if tempSCSIContrlr: + click.secho("\nSCSI Controllers", bold=True, underline=True) + + for index, cntlr in enumerate(tempSCSIContrlr): + click.echo("\n\t\t", nl=False) + click.secho("SCSI CONTROLLER {}\n".format(index + 1), underline=True) + + click.echo( + "\nController Type: {}".format(highlight_text(cntlr["controller_type"])) + ) + bus_sharing = bus_sharing_inv_map[cntlr["bus_sharing"]] + click.echo("Bus Sharing: {}".format(highlight_text(bus_sharing))) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to edit this controller")), + default="n", + ) + if choice[0] == "y": + controllers = list(vmw.CONTROLLER["SCSI"].keys()) + click.echo("\nChoose from given controller types:") + for ind, name in enumerate(controllers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt( + "\nEnter the index of controller type", default=1 + ) + if (ind > len(controllers)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + controller_type = controllers[ind - 1] + click.echo( + "{} selected".format(highlight_text(controller_type)) + ) + controller_type = vmw.CONTROLLER["SCSI"][controller_type] + break + + sharingOptions = list(vmw.BUS_SHARING.keys()) + click.echo("\nChoose from given sharing types:") + for ind, name in enumerate(sharingOptions): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of sharing type", default=1) + if (ind > len(sharingOptions)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + res = sharingOptions[ind - 1] + click.echo("{} selected".format(highlight_text(res))) + busSharing = vmw.BUS_SHARING[res] + break + + controller = { + "controller_type": controller_type, + "bus_sharing": busSharing, + "is_deleted": False, + "key": cntlr["key"], + } + spec["resources"]["template_controller_list"].append(controller) + + if tempSATAContrlr: + click.secho("\nSATA Controllers", bold=True, underline=True) + + for index, cntlr in enumerate(tempSATAContrlr): + click.echo("\n\t\t", nl=False) + click.secho("SATA CONTROLLER {}\n".format(index + 1), underline=True) + + click.echo( + "\nController Type: {}".format(highlight_text(cntlr["controller_type"])) + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to edit this controller")), + default="n", + ) + if choice[0] == "y": + controllers = list(vmw.CONTROLLER["SATA"].keys()) + click.echo("\nChoose from given controller types:") + for ind, name in enumerate(controllers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt( + "\nEnter the index of controller type", default=1 + ) + if (ind > len(controllers)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + controller_type = controllers[ind - 1] + click.echo( + "{} selected".format(highlight_text(controller_type)) + ) + controller_type = vmw.CONTROLLER["SATA"][controller_type] + break + + controller = { + "controller_type": controller_type, + "is_deleted": False, + "key": cntlr["key"], + } + spec["resources"]["template_controller_list"].append(controller) + + if tempDisks: + click.secho("\nConfig of Template Disks:", underline=True) + else: + click.echo("\nNo template disks found !!!") + + for index, disk in enumerate(tempDisks): + click.echo("\n\t\t", nl=False) + click.secho("vDisk {}\n".format(index + 1), underline=True) + disk_type = disk["disk_type"] + adapter_type = disk["adapter_type"] + + click.echo("\nDevice Type: {}".format(highlight_text(disk_type))) + click.echo("Adapter Type: {}".format(highlight_text(adapter_type))) + + if disk_type == "disk": + click.echo("Size (in GiB): {}".format(highlight_text(disk["size"] // 1024))) + click.echo( + "Location : {}".format(highlight_text(disk["location"][1])) + ) if "location" in disk.keys() else None + controller_label = ( + controller_key_type_map[disk["controller_key"]][1] + if "controller_key" in disk.keys() + and disk["controller_key"] in controller_key_type_map.keys() + else "-" + ) + click.echo("Controller: {}".format(highlight_text(controller_label))) + click.echo("Device Slot: {}".format(highlight_text(disk["device_slot"]))) + click.echo( + "Disk Mode: {}".format(highlight_text(disk["mode"])) + ) if "mode" in disk.keys() else None + click.echo("Exclude from vm config: {}".format(highlight_text("No"))) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to edit this disk")), + default="n", + ) + + # Only size, disk_mode and excluding checkbox is editable(FROM CALM_UI repo) + if choice[0] == "y": + size = click.prompt("\nEnter disk size (in GiB)", default=8) + click.echo("\nChoose from given disk modes:") + + disk_mode_list = list(vmw.DISK_MODE.keys()) + for ind, name in enumerate(disk_mode_list): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of disk mode", default=1) + if (ind > len(disk_mode_list)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + disk_mode = disk_mode_list[ind - 1] + click.echo("{} selected".format(highlight_text(disk_mode))) + disk_mode = vmw.DISK_MODE[disk_mode] + break + + is_deleted = click.prompt( + "\n{}(y/n)".format(highlight_text("Exclude disk from vm config")), + default="n", + ) + is_deleted = True if is_deleted[0] == "y" else False + dsk = { + "disk_size_mb": size * 1024, + "is_deleted": is_deleted, + "disk_mode": disk_mode, + "adapter_type": adapter_type, + "disk_type": disk_type, + "key": disk["key"], + "controller_key": disk["controller_key"], + "device_slot": disk["device_slot"], + "location": disk["location"][0], + } + + spec["resources"]["template_disk_list"].append(dsk) + else: + click.echo(highlight_text("\nNo field can be edited in this template disk")) + + if tempNics: + click.secho("\nConfig of Template Network Adapters:", underline=True) + else: + click.echo("\nNo template network adapters found !!!") + + for index, nic in enumerate(tempNics): + click.echo("\n\t\t", nl=False) + click.secho("vNIC-{}\n".format(index + 1), underline=True) + click.echo("\nAdapter Type: {}".format(highlight_text(nic["nic_type"]))) + click.echo("Network Type: {}".format(highlight_text(nic["net_name"]))) + click.echo("Exclude from vm config: {}".format(highlight_text("No"))) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to edit this nic")), default="n" + ) + if choice[0] == "y": + click.echo("\nChoose from given network adapters:") + adapter_types = list(vmw.NetworkAdapterMap.values()) + for ind, name in enumerate(adapter_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of adapter type", default=1) + if (ind > len(adapter_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + adapter_type = adapter_types[ind - 1] + click.echo("{} selected".format(highlight_text(adapter_type))) + break + + if not drs_mode: + network_name_id_map = Obj.networks(account_id, host_id=host_id) + else: + network_name_id_map = Obj.networks( + account_id, cluster_name=cluster_name + ) + + click.echo("\nChoose from given network types:") + network_names = list(network_name_id_map.keys()) + for ind, name in enumerate(network_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of network type", default=1) + if (ind > len(network_names)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + network_name = network_names[ind - 1] + click.echo("{} selected".format(highlight_text(network_name))) + network_id = network_name_id_map[network_name] + break + + is_deleted = click.prompt( + "\n{}(y/n)".format(highlight_text("Exclude network from vm config")), + default="n", + ) + is_deleted = True if is_deleted[0] == "y" else False + + network = { + "nic_type": adapter_type, + "is_deleted": is_deleted, + "net_name": network_id, + "key": nic["key"], + } + + spec["resources"]["template_nic_list"].append(network) + + # Add vmware tags + if LV(CALM_VERSION) >= LV("3.2.0"): + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Do you want to add any tags?")), + default="n", + ) + if choice[0] == "y": + tags_map = Obj.tags(account_id) + tag_names = list(tags_map["tag_list"].keys()) + tag_names_id = tags_map["tag_list"] + spec["resources"]["tag_list"] = list() + cardinality_list = tags_map["cardinality_list"] + + while True: + if not tag_names: + click.echo(highlight_text("\nNo tags available.")) + break + + else: + click.echo("\nChoose from given Category: Tag pairs: ") + for ind, name in enumerate(tag_names): + click.echo( + "\t {}. {}".format(str(ind + 1), highlight_text(name)) + ) + + while True: + res = click.prompt( + "\nEnter the index of Category: Tag pair", default=1 + ) + if (res > len(tag_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + selected_tag = tag_names[res - 1] + selected_tag_id = tag_names_id[selected_tag]["id"] + selected_category = tag_names_id[selected_tag]["name"] + spec["resources"]["tag_list"].append( + {"tag_id": selected_tag_id} + ) + click.echo("{} selected".format(highlight_text(selected_tag))) + tag_names.pop(res - 1) + if cardinality_list[selected_tag.split(":")[0]] == "SINGLE": + tag_names = [ + x + for x in tag_names + if not tag_names_id[x]["name"] == selected_category + ] + + break + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Do you want to add more tags?")), + default="n", + ) + if choice[0] == "n": + break + + VCenterVmProvider.validate_spec(spec) + + click.secho("\nControllers", underline=True) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add SCSI controllers")), default="n" + ) + while choice[0] == "y": + if controller_count["SCSI"] == vmw.ControllerLimit["SCSI"]: + click.echo(highlight_text("\nNo more SCSI controller can be added")) + + label = "SCSI controller {}".format(controller_count["SCSI"]) + key = controller_count["SCSI"] + vmw.KEY_BASE["CONTROLLER"]["SCSI"] + controller_label_key_map["SCSI"][label] = key + + click.echo("\n\t\t", nl=False) + click.secho("{}\n".format(label), underline=True) + + controllers = list(vmw.CONTROLLER["SCSI"].keys()) + click.echo("\nChoose from given controller types:") + for ind, name in enumerate(controllers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of controller type", default=1) + if (ind > len(controllers)) or (ind <= 0): + click.echo("Invalid index !!!") + + else: + controller_type = controllers[ind - 1] + click.echo("{} selected".format(highlight_text(controller_type))) + controller_type = vmw.CONTROLLER["SCSI"][controller_type] + break + + free_device_slots[label] = generate_free_slots( + vmw.ControllerDeviceSlotMap[controller_type] + ) + sharingOptions = list(vmw.BUS_SHARING.keys()) + click.echo("\nChoose from given sharing types:") + for ind, name in enumerate(sharingOptions): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of sharing type", default=1) + if (ind > len(sharingOptions)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + res = sharingOptions[ind - 1] + click.echo("{} selected".format(highlight_text(res))) + busSharing = vmw.BUS_SHARING[res] + break + + controller_count["SCSI"] += 1 + controller = { + "controller_type": controller_type, + "bus_sharing": busSharing, + "key": key, + } + + controller_key_type_map[key] = ("SCSI", label) + spec["resources"]["controller_list"].append(controller) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more SCSI controllers")), + default="n", + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add SATA controllers")), default="n" + ) + while choice[0] == "y": + if controller_count["SATA"] == vmw.ControllerLimit["SATA"]: + click.echo(highlight_text("\nNo more SATA controller can be added")) + + label = "SATA controller {}".format(controller_count["SATA"]) + key = controller_count["SATA"] + vmw.KEY_BASE["CONTROLLER"]["SATA"] + controller_label_key_map["SATA"][label] = key + + click.echo("\n\t\t", nl=False) + click.secho("{}\n".format(label), underline=True) + + controllers = list(vmw.CONTROLLER["SATA"].keys()) + click.echo("\nChoose from given controller types:") + for ind, name in enumerate(controllers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of controller type", default=1) + if (ind > len(controllers)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + controller_type = controllers[ind - 1] + click.echo("{} selected".format(highlight_text(controller_type))) + controller_type = vmw.CONTROLLER["SATA"][controller_type] + break + + free_device_slots[label] = generate_free_slots( + vmw.ControllerDeviceSlotMap[controller_type] + ) + controller_count["SATA"] += 1 + controller = {"controller_type": controller_type, "key": key} + + controller_key_type_map[key] = ("SATA", label) + spec["resources"]["controller_list"].append(controller) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more SATA controllers")), + default="n", + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add disks")), default="n" + ) + while choice[0] == "y": + click.echo("\nChoose from given disk types:") + disk_types = list(vmw.DISK_TYPES.keys()) + for ind, name in enumerate(disk_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of disk type", default=1) + if (ind > len(disk_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + disk_type = disk_types[ind - 1] + click.echo("{} selected".format(highlight_text(disk_type))) + disk_type = vmw.DISK_TYPES[disk_type] # TO BE USED + break + + click.echo("\nChoose from given adapter types:") + disk_adapters = vmw.DISK_ADAPTERS[disk_type] + for ind, name in enumerate(disk_adapters): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of adapter type", default=1) + if (ind > len(disk_adapters)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + adapter_type = disk_adapters[ind - 1] + click.echo("{} selected".format(highlight_text(adapter_type))) + # TO BE USED + adapter_type = vmw.DISK_ADAPTER_TYPES[adapter_type] + break + + if disk_type == "disk": + disk_size = click.prompt("\nEnter disk size (in GiB)", default=8) + + if not drs_mode: + datastore_name_url_map = Obj.datastores(account_id, host_id=host_id) + else: + datastore_name_url_map = Obj.datastores( + account_id, cluster_name=cluster_name + ) + + locations = list(datastore_name_url_map.keys()) + click.echo("\nChoose from given locations:") + for ind, name in enumerate(locations): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of datastore", default=1) + if ind > len(locations): + click.echo("Invalid index !!! ") + + else: + datastore_name = locations[ind - 1] + click.echo("{} selected".format(highlight_text(datastore_name))) + # TO BE USED + datastore_url = datastore_name_url_map[datastore_name] + break + + controllers = list(controller_label_key_map[adapter_type].keys()) + if controllers: + click.echo("\nChoose from given controllers:") + for ind, name in enumerate(controllers): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of controller", default=1) + if ind > len(controllers): + click.echo("Invalid index !!! ") + + else: + controller_label = controllers[ind - 1] # TO BE USED + click.echo( + "{} selected".format(highlight_text(controller_label)) + ) + controller_key = controller_label_key_map[adapter_type][ + controller_label + ] # TO BE USED + break + + click.echo("\nChoose from given device slots:") + slots = free_device_slots[controller_label] + for ind, name in enumerate(slots): + click.echo("\t {}. [ {} ]".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of slots", default=1) + if ind > len(slots): + click.echo("Invalid index !!! ") + + else: + device_slot = slots[ind - 1] # TO BE USED + click.echo("{} selected".format(highlight_text(device_slot))) + free_device_slots[controller_label].pop(ind - 1) + break + + click.echo("\nChoose from given device modes:") + disk_modes = list(vmw.DISK_MODE.keys()) + for ind, name in enumerate(disk_modes): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of device mode", default=1) + if ind > len(disk_modes): + click.echo("Invalid index !!! ") + + else: + disk_mode = disk_modes[ind - 1] # TO BE USED + click.echo("{} selected".format(highlight_text(disk_mode))) + disk_mode = vmw.DISK_MODE[disk_mode] + break + + dsk = { + "disk_size_mb": disk_size * 1024, + "disk_mode": disk_mode, + "device_slot": device_slot, # It differs from the request_payload from the UI + "adapter_type": adapter_type, + "location": datastore_url, + "controller_key": controller_key, + "disk_type": disk_type, + } + + else: + click.echo( + highlight_text( + "\nBy default, ISO images across all datastores are available for selection. To filter this list, select a datastore." + ) + ) + datastore_url = "" + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add datastore")), default="n" + ) + if choice[0] == "y": + if not drs_mode: + datastore_name_url_map = Obj.datastores(account_id, host_id=host_id) + else: + datastore_name_url_map = Obj.datastores( + account_id, cluster_name=cluster_name + ) + + datastores = list(datastore_name_url_map.keys()) + click.echo("\nChoose from given datastore:") + for ind, name in enumerate(datastores): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of datastore", default=1) + if (ind > len(datastores)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + datastore_name = datastores[ind - 1] + click.echo("{} selected".format(highlight_text(datastore_name))) + datastore_url = datastore_name_url_map[ + datastore_name + ] # TO BE USED + break + + if datastore_url: + file_paths = Obj.file_paths(account_id, datastore_url=datastore_url) + elif not drs_mode: + file_paths = Obj.file_paths(account_id, host_id=host_id) + else: + file_paths = Obj.file_paths(account_id, cluster_name=cluster_name) + + click.echo("\nChoose from given ISO file paths:") + for ind, name in enumerate(file_paths): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of file path", default=1) + if (ind > len(file_paths)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + iso_file_path = file_paths[ind - 1] + click.echo("{} selected".format(highlight_text(iso_file_path))) + break + + dsk = { + "adapter_type": adapter_type, + "iso_path": iso_file_path, + "location": datastore_url, + "disk_type": disk_type, + } + + spec["resources"]["disk_list"].append(dsk) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more disks")), default="n" + ) + + click.secho("\nNETWORK ADAPTERS", underline=True) + click.echo( + highlight_text( + "Network Configuration is needed for Actions and Runbooks to work" + ) + ) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add nics")), default="n" + ) + while choice[0] == "y": + click.echo("\nChoose from given network adapters:") + adapter_types = list(vmw.NetworkAdapterMap.values()) + for ind, name in enumerate(adapter_types): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of adapter type", default=1) + if (ind > len(adapter_types)) or (ind <= 0): + click.echo("Invalid index !!! ") + + else: + adapter_type = adapter_types[ind - 1] + click.echo("{} selected".format(highlight_text(adapter_type))) + break + + if not drs_mode: + network_name_id_map = Obj.networks(account_id, host_id=host_id) + else: + network_name_id_map = Obj.networks(account_id, cluster_name=cluster_name) + + click.echo("\nChoose from given network types:") + network_names = list(network_name_id_map.keys()) + for ind, name in enumerate(network_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + ind = click.prompt("\nEnter the index of network type", default=1) + if ind > len(network_names): + click.echo("Invalid index !!! ") + + else: + network_name = network_names[ind - 1] + click.echo("{} selected".format(highlight_text(network_name))) + network_id = network_name_id_map[network_name] + break + + network = {"nic_type": adapter_type, "net_name": network_id} + spec["resources"]["nic_list"].append(network) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more nics")), default="n" + ) + + click.secho("\nVM Guest Customization", underline=True) + + gc_enable = click.prompt("\nEnable Guest Customization(y/n)", default="n") + if gc_enable[0] == "y": + spec["resources"]["guest_customization"] = _guest_customization( + Obj, os, account_id + ) + else: + spec["resources"]["guest_customization"] = { + "customization_type": vmw.OperatingSystem[os] + } + + VCenterVmProvider.validate_spec(spec) + click.secho("\nCreate spec for your VMW VM:\n", underline=True) + click.echo(highlight_text(yaml.dump(spec, default_flow_style=False))) + + +def _windows_customization(Obj, account_id): + + spec = {"customization_type": vmw.OperatingSystem["Windows"], "windows_data": {}} + + click.echo("\nChoose from given Guest Customization Modes:") + gc_modes = vmw.GuestCustomizationModes["Windows"] + + for ind, name in enumerate(gc_modes): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Guest Customization Mode", default=1) + if (res > len(gc_modes)) or (res <= 0): + click.echo("Invalid index !!!") + + else: + gc_mode = gc_modes[res - 1] + click.echo("{} selected".format(highlight_text(gc_mode))) + break + + if gc_mode == "Predefined Customization": + customizations = Obj.customizations(account_id, "Windows") + + if not customizations: + click.echo( + highlight_text("No Predefined Guest Customization registered !!!") + ) + return {} + + click.echo("\nChoose from given customization names:") + for ind, name in enumerate(customizations): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Customization Name", default=1) + if (res > len(customizations)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + custn_name = customizations[res - 1] + click.echo("{} selected".format(highlight_text(custn_name))) + break + + return { + "customization_type": vmw.OperatingSystem["Linux"], + "customization_name": custn_name, + } + + else: + computer_name = click.prompt("\tComputer Name: ", default="") + full_name = click.prompt("\tFull name: ", default="") + organization_name = click.prompt("\tOrganization Name: ", default="") + product_id = click.prompt("\tProduct Id: ", default="") + + timezone_name_ind_map = Obj.timezones(vmw.OperatingSystem["Windows"]) + timezone_names = list(timezone_name_ind_map.keys()) + timezone = "" + + click.echo("\nChoose from given timezone names:") + for ind, name in enumerate(timezone_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Timezone", default=1) + if (res > len(timezone_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + timezone = timezone_names[res - 1] + click.echo("{} selected".format(highlight_text(timezone))) + timezone = timezone_name_ind_map[timezone] + break + + admin_password = click.prompt( + "\nAdmin Password", default="Admin_password", hide_input=True + ) + + choice = click.prompt( + "\nAutomatically logon as administrator(y/n)", default="n" + ) + auto_logon = True if choice[0] == "y" else False + + spec["windows_data"].update( + { + "product_id": product_id, + "computer_name": computer_name, + "auto_logon": auto_logon, + "organization_name": organization_name, + "timezone": timezone, + "full_name": full_name, + "password": { + "value": admin_password, + "attrs": {"is_secret_modified": True}, + }, + } + ) + + if auto_logon: + login_count = click.prompt( + "Number of times to logon automatically", default=1 + ) + spec["windows_data"]["login_count"] = login_count + + command_list = [] + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add commands")), default="n" + ) + + while choice[0] == "y": + command = click.prompt("\tCommand", default="") + command_list.append(command) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more commands")), + default="n", + ) + + spec["windows_data"]["command_list"] = command_list + + # Domain and Workgroup Setting + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to join domain")), default="n" + ) + is_domain = True if choice[0] == "y" else False + + if not is_domain: + workgroup = click.prompt("\n\tWorkgroup: ", default="") + spec["windows_data"].update( + {"is_domain": is_domain, "workgroup": workgroup} + ) + + else: + domain = click.prompt("\tDomain Name: ", default="") + domain_user = click.prompt("\tUsername: ", default="admin") + domain_password = click.prompt( + "\tPassword: ", default="Domain_password", hide_input=True + ) + spec["windows_data"].update( + { + "is_domain": is_domain, + "domain": domain, + "domain_user": domain_user, + "domain_password": { + "value": domain_password, + "attrs": {"is_secret_modified": True}, + }, + } + ) + + return spec + + +def _linux_customization(Obj, account_id): + + click.echo("\nChoose from given Guest Customization Modes:") + gc_modes = vmw.GuestCustomizationModes["Linux"] + + for ind, name in enumerate(gc_modes): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Guest Customization Mode", default=1) + if (res > len(gc_modes)) or (res <= 0): + click.echo("Invalid index !!!") + + else: + gc_mode = gc_modes[res - 1] + click.echo("{} selected".format(highlight_text(gc_mode))) + break + + if gc_mode == "Predefined Customization": + customizations = Obj.customizations(account_id, "Linux") + + if not customizations: + click.echo( + highlight_text("No Predefined Guest Customization registered !!!") + ) + return {} + + click.echo("\nChoose from given customization names:") + for ind, name in enumerate(customizations): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Customization Name", default=1) + if (res > len(customizations)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + custn_name = customizations[res - 1] + click.echo("{} selected".format(highlight_text(custn_name))) + break + + return { + "customization_type": vmw.OperatingSystem["Linux"], + "customization_name": custn_name, + } + + elif gc_mode == "Cloud Init": + script = click.prompt("\nEnter script", default="") + return { + "customization_type": vmw.OperatingSystem["Linux"], + "cloud_init": script, + } + + else: + host_name = click.prompt("\nEnter Hostname", default="") + domain = click.prompt("\nEnter Domain", default="") + + timezone_name_ind_map = Obj.timezones(vmw.OperatingSystem["Linux"]) + timezone_names = list(timezone_name_ind_map.keys()) + timezone = "" + + click.echo("\nChoose from given timezone names:") + for ind, name in enumerate(timezone_names): + click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name))) + + while True: + res = click.prompt("\nEnter the index of Timezone", default=1) + if (res > len(timezone_names)) or (res <= 0): + click.echo("Invalid index !!! ") + + else: + timezone = timezone_names[res - 1] + click.echo("{} selected".format(highlight_text(timezone))) + timezone = timezone_name_ind_map[timezone] + break + + choice = click.prompt("\nEnable Hardware clock UTC(y/n)", default="n") + hw_ctc_clock = True if choice[0] == "y" else False + + return { + "customization_type": vmw.OperatingSystem["Linux"], + "linux_data": { + "hw_utc_clock": hw_ctc_clock, + "domain": domain, + "hostname": host_name, + "timezone": timezone, + }, + } + + +def _guest_customization(Obj, os, account_id): + + if os == "Windows": + gc = _windows_customization(Obj, account_id) + data = "windows_data" + + else: + gc = _linux_customization(Obj, account_id) + data = "linux_data" + + if gc.get(data): + + click.secho("\nNetwork Settings", underline=True) + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add a network")), default="n" + ) + + network_settings = [] + while choice[0] == "y": + choice = click.prompt("\n\tUse DHCP(y/n)", default="y") + is_dhcp = True if choice[0] == "y" else False + + if not is_dhcp: + settings_name = click.prompt("\tSetting name: ", default="") + ip = click.prompt("\tIP: ", default="") + subnet_mask = click.prompt("\tSubnet Mask: ", default="") + gateway_default = click.prompt("\tDefault Gateway: ", default="") + gateway_alternate = click.prompt("\tAlternate Gateway: ", default="") + + network_settings.append( + { + "is_dhcp": is_dhcp, + "name": settings_name, + "ip": ip, + "subnet_mask": subnet_mask, + "gateway_default": gateway_default, + "gateway_alternate": gateway_alternate, + } + ) + + else: + network_settings.append({"is_dhcp": is_dhcp}) + + choice = click.prompt( + "\n{}(y/n)".format(highlight_text("Want to add more networks")), + default="n", + ) + + click.secho("\nDNS Setting", underline=True) + dns_primary = click.prompt("\n\tDNS Primary: ", default="") + dns_secondary = click.prompt("\tDNS Secondary: ", default="") + dns_tertiary = click.prompt("\tDNS Tertiary: ", default="") + dns_search_path = click.prompt("\tDNS Search Path: ", default="") + + gc[data].update( + { + "network_settings": network_settings, + "dns_search_path": [dns_search_path], + "dns_tertiary": dns_tertiary, + "dns_primary": dns_primary, + "dns_secondary": dns_secondary, + } + ) + + return gc + + +def generate_free_slots(limit): + + slots = [] + for i in range(limit): + slots.append(i) + + return slots diff --git a/framework/calm/dsl/providers/plugins/vmware_vm/vmware_vm_provider_spec.yaml.jinja2 b/framework/calm/dsl/providers/plugins/vmware_vm/vmware_vm_provider_spec.yaml.jinja2 new file mode 100644 index 0000000..5e5ab83 --- /dev/null +++ b/framework/calm/dsl/providers/plugins/vmware_vm/vmware_vm_provider_spec.yaml.jinja2 @@ -0,0 +1,504 @@ + +{% macro VcenterNicInfo() -%} + +title: VCenter NICs +type: [object, "null"] +properties: + nic_type: + type: string + net_name: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro VcenterTemplateNicInfo() -%} + +title: VCenter Template NIC +type: [object, "null"] +properties: + nic_type: + type: string + net_name: + type: string + is_deleted: + type: boolean + default: False + key: + type: integer + default: -1 + type: + type: string + +{%- endmacro %} + + +{% macro VcenterVdiskInfo() -%} + +title: VCenter Disk +type: [object, "null"] +properties: + disk_type: + type: string + default: disk + adapter_type: + type: string + default: SCSI + disk_size_mb: + type: ["integer", "null"] + disk_mode: + type: string + default: "persistent" + location: + type: string + default: "" + controller_key: + type: integer + device_slot: + type: ["integer", "string"] + iso_path: + type: string + default: "" + type: + type: string + +{%- endmacro %} + + +{% macro VcenterTemplateDiskInfo() -%} + +title: VCenter Template DISK +type: [object, "null"] +properties: + disk_type: + type: string + default: disk + adapter_type: + type: string + default: SCSI + disk_size_mb: + type: ["integer", "null"] + disk_mode: + type: string + location: + type: string + default: "" + controller_key: + type: integer + default: -1 + device_slot: + type: ["integer", "string"] + default: -1 + iso_path: + type: string + default: "" + is_deleted: + type: boolean + default: False + key: + type: integer + type: + type: string + +{%- endmacro %} + + +{% macro VcenterVControllerInfo() -%} + +title: VCenter Controller +type: [object, "null"] +properties: + controller_type: + type: string + key: + type: integer + default: -1 + bus_sharing: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro VcenterTemplateControllerInfo() -%} + +title: VCenter Template Controller +type: [object, "null"] +properties: + controller_type: + type: string + key: + type: integer + bus_sharing: + type: string + is_deleted: + type: boolean + default: False + type: + type: string + +{%- endmacro %} + + +{% macro VcenterNetworkSetting() -%} + +title: Guest Customization Network Settings +type: [object, "null"] +properties: + name: + type: string + subnet_mask: + type: string + gateway_default: + type: string + gateway_alternate: + type: string + is_dhcp: + type: boolean + ip: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro VcenterLinuxSpecInfo() -%} + +title: Guest Customization Linux data +type: [object, "null"] +properties: + hostname: + type: string + domain: + type: string + timezone: + type: string + hw_utc_clock: + type: boolean + dns_primary: + type: string + dns_secondary: + type: string + dns_tertiary: + type: string + dns_search_path: + type: array + items: + type: string + network_settings: + type: array + items: + {{ VcenterNetworkSetting() | indent(6) }} + type: + type: string + +{%- endmacro %} + + +{% macro VcenterWindowsSysprep() -%} + +title: VMWARE Windoes Guest Customization Sysprep +type: [object, "null"] +properties: + unattend_xml: + type: string + is_domain: + type: boolean + domain: + type: string + dns_ip: + type: string + dns_search_path: + type: string + domain_credential_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: subnet + name: + type: string + type: + type: string + type: + type: string + +{%- endmacro %} + + +{% macro VcenterWindowsSpecInfo() -%} + +title: Guest Customization Windows data +type: [object, "null"] +properties: + computer_name: + type: string + product_id: + type: string + full_name: + type: string + organization_name: + type: string + workgroup: + type: string + timezone: + type: string + is_domain: + type: boolean + domain: + type: string + domain_user: + type: string + command_list: + type: array + items: + type: string + dns_primary: + type: string + dns_secondary: + type: string + dns_tertiary: + type: string + dns_search_path: + type: array + items: + type: string + network_settings: + type: array + items: + {{ VcenterNetworkSetting() | indent(6) }} + auto_logon: + type: boolean + default: False + login_count: + type: integer + default: 1 + type: + type: string + password: + type: [object, "null"] + properties: + attrs: + type: [object, "null"] + properties: + is_secret_modified: + type: boolean + default: False + secret_reference: + type: [object, "null"] + default: {} + type: + type: string + value: + type: string + type: + type: string + domain_password: + type: [object, "null"] + properties: + attrs: + type: [object, "null"] + properties: + is_secret_modified: + type: boolean + default: False + secret_reference: + type: [object, "null"] + default: {} + type: + type: string + value: + type: string + type: + type: string + sysprep: + {{ VcenterWindowsSysprep() | indent(4) }} + +{%- endmacro %} + + +{% macro VcenterGuestCustomization() -%} + +title: VMWARE Guest Customization +type: [object, "null"] +properties: + customization_type: + type: string + enum: + - GUEST_OS_LINUX + - GUEST_OS_WINDOWS + cloud_init: + type: string + customization_name: + type: string + linux_data: + {{ VcenterLinuxSpecInfo() | indent(4) }} + windows_data: + {{ VcenterWindowsSpecInfo() | indent(4) }} + type: + type: string + +{%- endmacro %} + +{% macro VcenterLibrary() -%} + +title: VMWARE Content Library +type: [object, "null"] +properties: + library_id: + type: string + library_template_id: + type: string + library_template_type: + type: string + type: + type: string + +{%- endmacro %} + +{% macro VcenterFolder() -%} + +title: VMWARE Folder +type: [object, "null"] +properties: + existing_path: + type: string + new_path: + type: string + delete_empty_folder: + type: boolean + type: + type: string + +{%- endmacro %} + +{% macro VcenterTags() -%} + +title: VMWARE Tags +type: [object, "null"] +properties: + tag_id: + type: string + type: + type: string + +{%- endmacro %} + +{% macro VcenterVMResource() -%} + +title: VMWARE Resources +type: [object, "null"] +properties: + account_uuid: + type: string + cpu_hot_add: + type: boolean + num_vcpus_per_socket: + type: integer + default: 0 + num_sockets: + type: integer + default: 0 + memory_hot_plug: + type: boolean + memory_size_mib: + type: integer + default: 0 + power_state: + type: string + default: poweron + type: + type: string + nic_list: + type: array + items: + {{ VcenterNicInfo() | indent(6) }} + disk_list: + type: array + items: + {{ VcenterVdiskInfo() | indent(6) }} + controller_list: + type: array + items: + {{ VcenterVControllerInfo() | indent(6) }} + template_nic_list: + type: array + items: + {{ VcenterTemplateNicInfo() | indent(6) }} + tag_list: + type: array + items: + {{ VcenterTags() | indent(6) }} + template_disk_list: + type: array + items: + {{ VcenterTemplateDiskInfo() | indent(6) }} + template_controller_list: + type: array + items: + {{ VcenterTemplateControllerInfo() | indent(6) }} + guest_customization: + {{ VcenterGuestCustomization() | indent(4) }} + +{%- endmacro %} + + +{% macro VMWCreateSpec() -%} + +title: VMWare CreateSpec +type: [object, "null"] +properties: + name: + type: string + type: + type: string + enum: [PROVISION_VMWARE_VM, ''] + default: PROVISION_VMWARE_VM + drs_mode: + type: boolean + default: False + compute_drs_mode: + type: boolean + default: False + storage_drs_mode: + type: boolean + default: False + cluster: + type: string + template: + type: string + storage_pod: + type: string + host: + type: string + datastore: + type: string + clone_is_template: + type: boolean + default: False + folder: + {{ VcenterFolder() | indent(4) }} + library: + x-calm-dsl-min-version: 3.5.0 + {{ VcenterLibrary() | indent(4) }} + resources: + {{ VcenterVMResource() | indent(4) }} + +{%- endmacro %} + + +info: + title: VMWARE_VM + description: VMWARE VM spec payload using v3 API + version: 3.0.1 # TODO add right version of ahv schema + +components: + schemas: + provider_spec: + {{ VMWCreateSpec() | indent(6) }} diff --git a/framework/calm/dsl/runbooks/__init__.py b/framework/calm/dsl/runbooks/__init__.py new file mode 100644 index 0000000..27dc473 --- /dev/null +++ b/framework/calm/dsl/runbooks/__init__.py @@ -0,0 +1,64 @@ +# Do not use `from calm.dsl.builtins import *`, As we need to include +# each module inside __all__ variable, else `from calm.dsl.runbooks import *` +# will not import those modules. + +from calm.dsl.builtins.models.ref import ref, RefType +from calm.dsl.builtins.models.calm_ref import Ref +from calm.dsl.builtins.models.metadata import Metadata, MetadataType +from calm.dsl.builtins.models.credential import ( + basic_cred, + secret_cred, + dynamic_cred, + CredentialType, +) + +from calm.dsl.builtins.models.utils import ( + read_file, + read_local_file, + read_env, +) + +from calm.dsl.builtins.models.variable import RunbookVariable +from calm.dsl.builtins.models.task import RunbookTask, Status +from calm.dsl.builtins.models.runbook import Runbook, runbook, runbook_json, branch +from calm.dsl.builtins.models.action import parallel + +from calm.dsl.builtins.models.endpoint import ( + Endpoint, + _endpoint, + CalmEndpoint, +) + +from calm.dsl.builtins.models.runbook_service import RunbookService +from calm.dsl.builtins.models.endpoint_payload import create_endpoint_payload +from calm.dsl.builtins.models.runbook_payload import create_runbook_payload + + +__all__ = [ + "Ref", + "ref", + "RefType", + "basic_cred", + "secret_cred", + "dynamic_cred", + "CredentialType", + "Metadata", + "MetadataType", + "read_file", + "read_local_file", + "read_env", + "RunbookVariable", + "RunbookTask", + "Status", + "Runbook", + "runbook", + "runbook_json", + "branch", + "parallel", + "Endpoint", + "_endpoint", + "CalmEndpoint", + "RunbookService", + "create_endpoint_payload", + "create_runbook_payload", +] diff --git a/framework/calm/dsl/store/__init__.py b/framework/calm/dsl/store/__init__.py new file mode 100644 index 0000000..a8712ff --- /dev/null +++ b/framework/calm/dsl/store/__init__.py @@ -0,0 +1,5 @@ +from .secrets import Secret +from .cache import Cache +from .version import Version + +__all__ = ["Secret", "Cache", "Version"] diff --git a/framework/calm/dsl/store/cache.py b/framework/calm/dsl/store/cache.py new file mode 100644 index 0000000..1e72ddc --- /dev/null +++ b/framework/calm/dsl/store/cache.py @@ -0,0 +1,228 @@ +import click +import sys +import traceback +from peewee import OperationalError, IntegrityError +from distutils.version import LooseVersion as LV + +from .version import Version +from calm.dsl.config import get_context +from calm.dsl.db import get_db_handle, init_db_handle +from calm.dsl.log import get_logging_handle +from calm.dsl.api import get_client_handle_obj + +LOG = get_logging_handle(__name__) + +CALM_VERSION = Version.get_version("Calm") + + +class Cache: + """Cache class Implementation""" + + @classmethod + def get_cache_tables(cls, sync_version=False): + """returns tables used for cache purpose""" + + db = get_db_handle() + db_tables = db.registered_tables + + # Get calm version from api only if necessary + calm_version = CALM_VERSION + if sync_version or (not calm_version): + context = get_context() + server_config = context.get_server_config() + client = get_client_handle_obj( + server_config["pc_ip"], + server_config["pc_port"], + auth=(server_config["pc_username"], server_config["pc_password"]), + ) + res, err = client.version.get_calm_version() + if err: + LOG.error("Failed to get version") + sys.exit(err["error"]) + calm_version = res.content.decode("utf-8") + + cache_tables = {} + + for table in db_tables: + if hasattr(table, "__cache_type__") and ( + LV(calm_version) >= LV(table.feature_min_version) + ): + cache_tables[table.__cache_type__] = table + return cache_tables + + @classmethod + def get_entity_data(cls, entity_type, name, **kwargs): + """returns entity data corresponding to supplied entry using entity name""" + + db_cls = cls.get_entity_db_table_object(entity_type) + + try: + res = db_cls.get_entity_data(name=name, **kwargs) + except OperationalError: + formatted_exc = traceback.format_exc() + LOG.debug("Exception Traceback:\n{}".format(formatted_exc)) + LOG.error( + "Cache error occurred. Please update cache using 'calm update cache' command" + ) + sys.exit(-1) + + if not res: + kwargs["name"] = name + LOG.debug( + "Unsuccessful db query from {} table for following params {}".format( + entity_type, kwargs + ) + ) + + return res + + @classmethod + def get_entity_data_using_uuid(cls, entity_type, uuid, *args, **kwargs): + """returns entity data corresponding to supplied entry using entity uuid""" + + db_cls = cls.get_entity_db_table_object(entity_type) + + try: + res = db_cls.get_entity_data_using_uuid(uuid=uuid, **kwargs) + except OperationalError: + formatted_exc = traceback.format_exc() + LOG.debug("Exception Traceback:\n{}".format(formatted_exc)) + LOG.error( + "Cache error occurred. Please update cache using 'calm update cache' command" + ) + sys.exit(-1) + + if not res: + kwargs["uuid"] = uuid + LOG.debug( + "Unsuccessful db query from {} table for following params {}".format( + entity_type, kwargs + ) + ) + + return res + + @classmethod + def get_entity_db_table_object(cls, entity_type): + """returns database entity table object corresponding to entity""" + + if not entity_type: + LOG.error("No entity type for cache supplied") + sys.exit(-1) + + cache_tables = cls.get_cache_tables() + db_cls = cache_tables.get(entity_type, None) + if not db_cls: + LOG.error("Unknown entity type ({}) supplied".format(entity_type)) + sys.exit(-1) + + return db_cls + + @classmethod + def add_one(cls, entity_type, uuid, **kwargs): + """adds one entity to entity db object""" + + db_obj = cls.get_entity_db_table_object(entity_type) + db_obj.add_one(uuid, **kwargs) + + @classmethod + def delete_one(cls, entity_type, uuid, **kwargs): + """adds one entity to entity db object""" + + db_obj = cls.get_entity_db_table_object(entity_type) + db_obj.delete_one(uuid, **kwargs) + + @classmethod + def update_one(cls, entity_type, uuid, **kwargs): + """adds one entity to entity db object""" + + db_obj = cls.get_entity_db_table_object(entity_type) + db_obj.update_one(uuid, **kwargs) + + @classmethod + def sync(cls): + """Sync cache by latest data""" + + def sync_tables(tables): + for table in tables: + table.sync() + click.echo(".", nl=False, err=True) + + cache_table_map = cls.get_cache_tables(sync_version=True) + tables = list(cache_table_map.values()) + + # Inserting version table at start + tables.insert(0, Version) + + try: + LOG.info("Updating cache", nl=False) + sync_tables(tables) + + except (OperationalError, IntegrityError): + click.echo(" [Fail]") + # init db handle once (recreating db if some schema changes are there) + LOG.info("Removing existing db and updating cache again") + init_db_handle() + LOG.info("Updating cache", nl=False) + sync_tables(tables) + click.echo(" [Done]", err=True) + + @classmethod + def sync_table(cls, cache_type): + """sync the cache table provided in cache_type list""" + + if not cache_type: + return + + cache_type = [cache_type] if not isinstance(cache_type, list) else cache_type + cache_table_map = cls.get_cache_tables() + + for _ct in cache_type: + if _ct not in cache_table_map: + LOG.warning("Invalid cache_type ('{}') provided".format(cache_type)) + continue + + cache_table = cache_table_map[_ct] + cache_table.sync() + + @classmethod + def clear_entities(cls): + """Clear data present in the cache tables""" + + # For now clearing means erasing all data. So reinitialising whole database + init_db_handle() + + @classmethod + def show_data(cls): + """Display data present in cache tables""" + + cache_tables = cls.get_cache_tables() + for cache_type, table in cache_tables.items(): + click.echo("\n{}".format(cache_type.upper())) + try: + table.show_data() + except OperationalError: + formatted_exc = traceback.format_exc() + LOG.debug("Exception Traceback:\n{}".format(formatted_exc)) + LOG.error( + "Cache error occurred. Please update cache using 'calm update cache' command" + ) + sys.exit(-1) + + @classmethod + def show_table(cls, cache_type): + """sync the cache table provided in cache_type list""" + + if not cache_type: + return + + cache_type = [cache_type] if not isinstance(cache_type, list) else cache_type + cache_table_map = cls.get_cache_tables() + + for _ct in cache_type: + if _ct not in cache_table_map: + LOG.warning("Invalid cache_type ('{}') provided".format(cache_type)) + continue + + cache_table = cache_table_map[_ct] + cache_table.show_data() diff --git a/framework/calm/dsl/store/secrets.py b/framework/calm/dsl/store/secrets.py new file mode 100644 index 0000000..2afd6a0 --- /dev/null +++ b/framework/calm/dsl/store/secrets.py @@ -0,0 +1,123 @@ +import datetime +import uuid +import peewee + +from ..crypto import Crypto +from calm.dsl.db import get_db_handle +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +class Secret: + """Secret class implementation""" + + @classmethod + def create(cls, name, value, pass_phrase="dslp4ssw0rd"): + """Stores the secret in db""" + + db = get_db_handle() + pass_phrase = pass_phrase.encode() + LOG.debug("Encryting data") + encrypted_msg = Crypto.encrypt_AES_GCM(value, pass_phrase) + + (kdf_salt, ciphertext, iv, auth_tag) = encrypted_msg + + secret = db.secret_table.create(name=name, uuid=str(uuid.uuid4())) + + db.data_table.create( + secret_ref=secret, + kdf_salt=kdf_salt, + ciphertext=ciphertext, + iv=iv, + auth_tag=auth_tag, + pass_phrase=pass_phrase, + ) + + @classmethod + def get_instance(cls, name): + """Return secret instance""" + + db = get_db_handle() + try: + secret = db.secret_table.get(db.secret_table.name == name) + except peewee.DoesNotExist: + raise ValueError("Entity not found !!!") + + return secret + + @classmethod + def delete(cls, name): + """Deletes the secret from db""" + + secret = cls.get_instance(name) + secret.delete_instance(recursive=True) + + @classmethod + def update(cls, name, value): + """Updates the secret in Database""" + + db = get_db_handle() + secret = cls.get_instance(name) + secret_data = secret.data[0] # using backref + + pass_phrase = secret_data.pass_phrase + + LOG.debug("Encrypting new data") + encrypted_msg = Crypto.encrypt_AES_GCM(value, pass_phrase) + + (kdf_salt, ciphertext, iv, auth_tag) = encrypted_msg + + query = db.data_table.update( + kdf_salt=kdf_salt, + ciphertext=ciphertext, + iv=iv, + auth_tag=auth_tag, + pass_phrase=pass_phrase, + ).where(db.data_table.secret_ref == secret) + + query.execute() + + query = db.secret_table.update(last_update_time=datetime.datetime.now()).where( + db.secret_table.name == name + ) + + query.execute() + + @classmethod + def list(cls): + """returns the list of secrets stored in db""" + + db = get_db_handle() + + secret_basic_configs = [] + for secret in db.secret_table.select(): + secret_basic_configs.append(secret.get_detail_dict()) + + return secret_basic_configs + + @classmethod + def find(cls, name, pass_phrase=None): + """Find the value of secret""" + + secret = cls.get_instance(name) + secret_data = secret.data[0] # using backref + + if not pass_phrase: + pass_phrase = secret_data.pass_phrase + else: + pass_phrase = pass_phrase.encode() + + enc_msg = secret_data.generate_enc_msg() + LOG.debug("Decrypting data") + secret_val = Crypto.decrypt_AES_GCM(enc_msg, pass_phrase) + + return secret_val + + @classmethod + def clear(cls): + """Deletes all the secrets present in the data""" + + db = get_db_handle() + for secret in db.secret_table.select(): + secret.delete_instance(recursive=True) diff --git a/framework/calm/dsl/store/version.py b/framework/calm/dsl/store/version.py new file mode 100644 index 0000000..fc9fb04 --- /dev/null +++ b/framework/calm/dsl/store/version.py @@ -0,0 +1,51 @@ +import peewee + +from calm.dsl.db import get_db_handle +from calm.dsl.api import get_api_client + + +class Version: + """Version class Implementation""" + + @classmethod + def create(cls, name="", version=""): + """Store the uuid of entity in cache""" + + db = get_db_handle() + db.version_table.create(name=name, version=version) + + @classmethod + def get_version(cls, name): + """Returns the version of entity present""" + + db = get_db_handle() + try: + entity = db.version_table.get(db.version_table.name == name) + return entity.version + + except peewee.DoesNotExist: + return None + + @classmethod + def sync(cls): + + db = get_db_handle() + for entity in db.version_table.select(): + query = db.version_table.delete().where( + db.version_table.name == entity.name + ) + query.execute() + + client = get_api_client() + + # Update calm version + res, err = client.version.get_calm_version() + calm_version = res.content.decode("utf-8") + cls.create("Calm", calm_version) + + # Update pc_version of PC(if host exist) + res, err = client.version.get_pc_version() + if not err: + res = res.json() + pc_version = res["version"] + cls.create("PC", pc_version) diff --git a/framework/calm/dsl/tools/__init__.py b/framework/calm/dsl/tools/__init__.py new file mode 100644 index 0000000..250700e --- /dev/null +++ b/framework/calm/dsl/tools/__init__.py @@ -0,0 +1,12 @@ +from .ping import ping +from .validator import StrictDraft7Validator +from .utils import get_module_from_file, make_file_dir + + +__all__ = [ + "RenderJSON", + "ping", + "StrictDraft7Validator", + "get_module_from_file", + "make_file_dir", +] diff --git a/framework/calm/dsl/tools/ping.py b/framework/calm/dsl/tools/ping.py new file mode 100644 index 0000000..372c3b8 --- /dev/null +++ b/framework/calm/dsl/tools/ping.py @@ -0,0 +1,12 @@ +import os +import warnings + + +def ping(ip): + + # check if ip is reachable + ecode = os.system("ping -c 1 " + ip) + if ecode != 0: + warnings.warn(UserWarning("Cannot reach PC server at {}".format(ip))) + return False + return True diff --git a/framework/calm/dsl/tools/render_json.py b/framework/calm/dsl/tools/render_json.py new file mode 100644 index 0000000..7b62f94 --- /dev/null +++ b/framework/calm/dsl/tools/render_json.py @@ -0,0 +1,32 @@ +import uuid + +try: + from IPython.display import display_javascript, display_html +except ImportError: + print("Could not import Ipython based classes") + +import json + + +class RenderJSON: + def __init__(self, json_data): + if isinstance(json_data, dict): + self.json_str = json.dumps(json_data) + else: + self.json_str = json_data + self.uuid = str(uuid.uuid4()) + + def _ipython_display_(self): + display_html( + '
'.format(self.uuid), + raw=True, + ) + display_javascript( + """ + require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() { + document.getElementById('%s').appendChild(renderjson(%s)) + }); + """ + % (self.uuid, self.json_str), + raw=True, + ) diff --git a/framework/calm/dsl/tools/utils.py b/framework/calm/dsl/tools/utils.py new file mode 100644 index 0000000..21dd26f --- /dev/null +++ b/framework/calm/dsl/tools/utils.py @@ -0,0 +1,38 @@ +import importlib.util +import os +import sys +import errno + +from calm.dsl.log import get_logging_handle + +LOG = get_logging_handle(__name__) + + +def make_file_dir(path, is_dir=False): + """creates the file directory if not present""" + + # Create parent directory if not present + if not os.path.exists(os.path.dirname(os.path.realpath(path))): + try: + os.makedirs(os.path.dirname(os.path.realpath(path))) + + except OSError as exc: + if exc.errno != errno.EEXIST: + raise Exception("[{}] - {}".format(exc["code"], exc["error"])) + + if is_dir and (not os.path.exists(path)): + os.makedirs(path) + + +def get_module_from_file(module_name, file): + """Returns a module given a user python file (.py)""" + spec = importlib.util.spec_from_file_location(module_name, file) + user_module = importlib.util.module_from_spec(spec) + + try: + spec.loader.exec_module(user_module) + except Exception as exp: + LOG.exception(exp) + sys.exit(-1) + + return user_module diff --git a/framework/calm/dsl/tools/validator.py b/framework/calm/dsl/tools/validator.py new file mode 100644 index 0000000..1730eaa --- /dev/null +++ b/framework/calm/dsl/tools/validator.py @@ -0,0 +1,136 @@ +from jsonschema import _utils +from jsonschema import Draft7Validator, validators +from jsonschema.exceptions import _Error +from jsonschema._utils import ensure_list, types_msg, unbool +import textwrap +from ruamel import yaml +import json + +_unset = _utils.Unset() + + +class validation_error(_Error): + + _word_for_schema_in_error_message = "validating schema" + _word_for_instance_in_error_message = "instance schema" + + def __unicode__(self): + essential_for_verbose = ( + self.validator, + self.validator_value, + self.instance, + self.schema, + ) + if any(m is _unset for m in essential_for_verbose): + return self.message + + self.schema = yaml.dump( + json.loads(json.dumps(self.schema, sort_keys=True, indent=4)) + ) + self.instance = yaml.dump( + json.loads(json.dumps(self.instance, sort_keys=True, indent=4)) + ) + + pschema = yaml.dump(self.schema, default_flow_style=False) + pinstance = yaml.dump(self.instance, default_flow_style=False) + + return ( + self.message + + textwrap.dedent( + """ + + Failed validating %s at %s: + %s + + By %r validator in %s at %s: + %s + """.rstrip() + ) + % ( + self._word_for_instance_in_error_message, + _utils.format_as_index(self.relative_path), + pinstance, + self.validator, + self._word_for_schema_in_error_message, + _utils.format_as_index(list(self.relative_schema_path)[:-1]), + pschema, + ) + ) + + __str__ = __unicode__ + + +# Note: Override any new property used in provider schema for proper traceback +# Supported properties for now: ["property", "anyOf", "type", "enum", "minLength", "maxLength"] +def extend_validator(ValidatorClass): + def properties(validator, properties, instance, schema): + if not validator.is_type(instance, "object"): + return + + # for managing defaults in the schema + for property, subschema in properties.items(): + if "default" in subschema: + instance.setdefault(property, subschema["default"]) + + # for handling additional properties found in user spec + for property, value in instance.items(): + + if property in properties: + for error in validator.descend( + value, properties[property], path=property, schema_path=property + ): + yield error + + else: + error = "Additional properties are not allowed : %r" % (property) + yield validation_error(error) + + def anyOf(validator, anyOf, instance, schema): + all_errors = [] + for index, subschema in enumerate(anyOf): + errs = list(validator.descend(instance, subschema, schema_path=index)) + if not errs: + break + all_errors.extend(errs) + else: + yield validation_error( + "%r is not valid under any of the given schemas" % (instance,), + context=all_errors, + ) + + def type(validator, types, instance, schema): + types = ensure_list(types) + + if not any(validator.is_type(instance, type) for type in types): + yield validation_error(types_msg(instance, types)) + + def minLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) < mL: + yield validation_error("%r is too short" % (instance,)) + + def maxLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) > mL: + yield validation_error("%r is too long" % (instance,)) + + def enum(validator, enums, instance, schema): + if instance == 0 or instance == 1: + unbooled = unbool(instance) + if all(unbooled != unbool(each) for each in enums): + yield validation_error("%r is not one of %r" % (instance, enums)) + elif instance not in enums: + yield validation_error("%r is not one of %r" % (instance, enums)) + + return validators.extend( + ValidatorClass, + { + "properties": properties, + "anyOf": anyOf, + "type": type, + "minLength": minLength, + "maxLength": maxLength, + "enum": enum, + }, + ) + + +StrictDraft7Validator = extend_validator(Draft7Validator) diff --git a/framework/helpers/__init__.py b/framework/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/helpers/exception_utils.py b/framework/helpers/exception_utils.py new file mode 100644 index 0000000..fc4bdea --- /dev/null +++ b/framework/helpers/exception_utils.py @@ -0,0 +1,58 @@ +class RestError(Exception): + """A REST error occurred.""" + + def __init__(self, message=None, error=None, **kwargs): + self.message = "An error occurred with REST API call." \ + if message is None else message + super(RestError, self).__init__(self.message) + self.error = 'RestError' if error is None else error + try: + for attr in kwargs: + setattr(self, attr, kwargs[attr]) + except: + pass + + +class ResponseError(Exception): + """A Response error occurred.""" + + def __init__(self, message=None, error=None, **kwargs): + self.message = "An error occurred with REST API response." \ + if message is None else message + super(ResponseError, self).__init__(self.message) + self.error = 'ResponseError' if error is None else error + try: + for attr in kwargs: + setattr(self, attr, kwargs[attr]) + except: + pass + + +class JsonError(Exception): + """Exception occurred while parsing Yaml""" + + def __init__(self, message=None, error=None, **kwargs): + self.message = "Something went wrong while parsing json file!" \ + if message is None else message + super(JsonError, self).__init__(self.message) + self.error = 'JSON-parse-error' if error is None else error + try: + for attr in kwargs: + setattr(self, attr, kwargs[attr]) + except: + pass + + +class YamlError(Exception): + """Exception occurred while parsing Yaml""" + + def __init__(self, message=None, error=None, **kwargs): + self.message = "Something went wrong while parsing yml file!" \ + if message is None else message + super(YamlError, self).__init__(self.message) + self.error = 'YAML-parse-error' if error is None else error + try: + for attr in kwargs: + setattr(self, attr, kwargs[attr]) + except: + pass diff --git a/framework/helpers/general_utils.py b/framework/helpers/general_utils.py new file mode 100644 index 0000000..4bebcb7 --- /dev/null +++ b/framework/helpers/general_utils.py @@ -0,0 +1,185 @@ +import json5 as json +import re +import os +import cerberus +import yaml +from typing import List, Type +from distutils.file_util import copy_file +from scripts.python.script import Script +from functools import wraps +from .log_utils import get_logger +from .exception_utils import JsonError, YamlError + +logger = get_logger(__name__) + + +def get_json_file_contents(file: str) -> dict: + """ + Read contents of the json file, "file" and return the data + """ + logger.info(f"Reading contents of the file: [{file}]") + with open(file, 'r') as f: + try: + return json.load(f) + except Exception as e: + raise JsonError(str(e)) + + +def get_yml_file_contents(file: str) -> dict: + """ + Read contents of the json file, "file" and return the data + """ + logger.info(f"Reading contents of the file: [{file}]") + with open(file, 'r') as f: + try: + return yaml.safe_load(f) + except Exception as e: + raise YamlError(str(e)) + + +def validate_ip(field, value, error): + """ + Function to check if "value" is a valid ip or not, if not it'll raise error("field") + Eg: validate_ip("cvm_ip", "1.1.1.1", Exception) + """ + pattern = re.compile(r"^((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)(\.(?!$)|$)){4}$") + if not pattern.match(value, ): + error(field, '"{}" must be a valid IP address'.format(value)) + return False + return True + +def validate_ip_list(field, value, error): + """ + Function to check if value has list of valid ip's or not, if not it'll raise error("field") + Eg: validate_ip("cvm_ip", "1.1.1.1", Exception) + """ + pattern = re.compile(r"^((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)(\.(?!$)|$)){4}$") + for ip in value: + if not pattern.match(ip, ): + error(field, '"{}" must be a valid IP address'.format(ip)) + +def contains_whitespace(field, value, error): + """ + Check if string has whitespace + """ + if ' ' in value: + error(field, f"Space is not allowed in {value}") + + +def validate_domain(field, value, error): + """ + Function to validate the domain + """ + pattern = re.compile(r'^((?!-)[A-Za-z0-9-]{1,63}(? bool: + """ + Function used to validate json/ yaml schema + data: input data to be verified + schema: schema to be validated against + """ + validated = False # reflect whether the overall process succeeded + validator = cerberus.Validator(schema, allow_unknown=True) + + if not validator.validate(data): + logger.error(validator.errors) + else: + logger.info("Validated the schema successfully!") + validated = True + return validated + + +def create_new_directory(path: str): + """ + Function to create a new directory "path" + """ + logger.info(f"Creating directory [{path}] if it doesn't exist") + try: + os.makedirs(path, exist_ok=True) + except PermissionError: + raise PermissionError + except OSError as e: + raise e + + +def copy_file_util(src_path: str, dst_path: str): + """ + Function to copy a file from "src_path" to "dst_path" + """ + try: + copy_file(src_path, dst_path) + except IOError as e: + logger.info("IO error occurred while copying the file.") + raise e + except Exception as e: + logger.error("An error occurred while copying the file.") + raise e + else: + logger.info("File copied successfully!") + + +def run_script(scripts: List[Type[Script]], data: dict): + """ + Provided list of "scripts", this function runs individual script using "run" and then runs "verify" + """ + for script in scripts: + logger.info(f"Calling the script '{script.__name__}'...") + script_obj = script(data) + try: + script_obj.run() + except Exception as e: + logger.exception(e) + continue + + +def intersection(first_obj, second_obj): + """ + Function used to check if second_obj is present in first_obj + """ + if isinstance(first_obj, dict): + for key, value in first_obj.items(): + if key in second_obj and second_obj[key] == value: + second_obj.pop(key) + if isinstance(value, (dict, list)): + intersection(value, second_obj) + if not second_obj: + return True + elif isinstance(first_obj, list): + for item in first_obj: + intersection(item, second_obj) + return False + + +def enforce_data_arg(func): + """ + Function to enforce functions to just have 1 argument + """ + + @wraps(func) + def wrapper(data): + return func(data) + + return wrapper + + +def convert_to_secs(value: int, unit: str): + """ + This routine converts given value to time interval into seconds as per unit + """ + conversion_multiplier = { + "MINUTE": 60, + "HOUR": 3600, + "DAY": 86400, + "WEEK": 604800, + } + if unit not in conversion_multiplier: + return None, "Invalid unit given for interval conversion to seconds" + + return value * conversion_multiplier[unit], None diff --git a/framework/helpers/helper_functions.py b/framework/helpers/helper_functions.py new file mode 100644 index 0000000..54c8dcb --- /dev/null +++ b/framework/helpers/helper_functions.py @@ -0,0 +1,296 @@ +import os +import pathlib +import sys +from datetime import datetime +from helpers.general_utils import validate_schema, get_json_file_contents, create_new_directory, \ + copy_file_util, enforce_data_arg, get_yml_file_contents +from helpers.general_utils import validate_ip +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.v2.cluster import Cluster as PeCluster +from scripts.python.helpers.v3.cluster import Cluster as PcCluster +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + +SITES_DIRECTORY = "sites" +FRAMEWORK_DIRECTORY = "framework" +GLOBAL_CONFIG_NAME = "global.json" +SITES_CONFIG_DIRECTORY = "config" +LOG_NAME = "script_log.log" + +""" +These are the functions that are part of pre_run_actions and post_run_actions in main.py + +How to write helper functions? + 1. Add a decorator "@enforce_data_arg" to the user defined functions + 2. Define your function with argument "data". "data" will be populated with the below data at + the beginning of the workflow. + eg: data["input_files"] will have the input files + data["schema"] will have the schema specified for the input + data["project_root"] will have the Path of project root, for file/ path manipulation + 3. You can update the "data" in the function to persist it across functions + 4. Once data is updated, it can be used by other functions +""" + + +# """ +# This scenario will work if these functions are defined in pre_run_actions. Eg: +# pre_run_actions += [test_func1, test_func2] +# +# // Running Func 1 +# // Running Func 2 +# // Yay I am able to access the data I defined in previous function +# // test +# """ + +# @enforce_data_arg +# def test_func1(data): +# """ +# Test data that just adds a key to data +# """ +# print("Running Func 1") +# data["test"] = "test" +# +# +# @enforce_data_arg +# def test_func2(data): +# """ +# Test data that just prints a key from data +# """ +# print("Running Func 2") +# print("Yay I am able to access the data I defined in previous function") +# print(data["test"]) + + +@enforce_data_arg +def get_input_data(data: dict) -> None: + """ + Read data from input file and global.json + """ + try: + global_config_file = f"{data['project_root']}/{SITES_CONFIG_DIRECTORY}/{GLOBAL_CONFIG_NAME}" + data.update(get_json_file_contents(global_config_file)) + files = data["input_files"] + + for file in files: + file = file.strip() + # todo any better way to just read it in one shot? as yml is superset of json + file_ext = pathlib.Path(file).suffix + if file_ext == ".json": + data.update(get_json_file_contents(file)) + else: + data.update(get_yml_file_contents(file)) + except Exception as e: + logger.error(e) + sys.exit(1) + + +@enforce_data_arg +def validate_input_data(data: dict): + """ + validate the input data against a schema + """ + schema = data.get("schema") + # initialize the validator with schema + valid_input = validate_schema(schema, data) + + if not valid_input: + logger.error("The entered input parameters is/are invalid. Please check the errors and try again!") + sys.exit(1) + + +@enforce_data_arg +def get_aos_url_mapping(data: dict) -> None: + """ + AOS version to url mapping + """ + aos_version = data.get("imaging_parameters").pop("aos_version") + aos_versions = data.get("aos_versions", {}) + if aos_version in aos_versions: + data["aos_url"] = aos_versions[aos_version].get('url') + else: + raise Exception("Unsupported AOS version, verify if you have specified the supported AOS version and try again") + + +@enforce_data_arg +def get_hypervisor_url_mapping(data: dict) -> None: + """ + Hypervisor version to url mapping + """ + # get hypervisor_url + hyp_version = str(data.get("imaging_parameters").pop("hypervisor_version")) + hyp_type = data["imaging_parameters"]["hypervisor_type"] + hypervisors = data.get("hypervisors", {}) + + if hyp_type in hypervisors: + if hyp_version in hypervisors[hyp_type]: + data["hypervisor_url"] = hypervisors[hyp_type][hyp_version].get('url') + else: + raise Exception("Unsupported Hypervisor version, verify if you have specified the supported Hypervisor " + "version and try again") + else: + raise Exception("Unsupported Hypervisor!") + + +@enforce_data_arg +def save_logs(data: dict): + """ + Create a new directory and save logs + """ + logger.info("Pushing logs...") + # create a new site directory + branch = data["site_name"] + timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H:%M:%S") + + new_site_directory = f"{data['project_root']}/{SITES_DIRECTORY}/{branch}/{timestamp}" + # as we are using mkdir -p, this will create the branch directory, along with logs directory as well + logs_directory = f"{new_site_directory}/logs" + create_new_directory(logs_directory) + + # push logs to the branch + source = f"{data['project_root']}/{FRAMEWORK_DIRECTORY}/{LOG_NAME}" + destination = f"{logs_directory}/app_logs.log" + copy_file_util(source, destination) + + config_directory = f"{new_site_directory}/configs" + create_new_directory(config_directory) + + # push input configs to the branch + files = data["input_files"] + + for file in files: + _, file_name = os.path.split(file) + destination = f"{config_directory}/{file_name}" + copy_file_util(file, destination) + + +@enforce_data_arg +def create_pe_pc_objects(data: dict): + """ + This function will create necessary Pc and Pe objects that can be used by the scripts + This script does the below actions: + 1. Reads the input configs and creates a "pc_session" from pc_ip, pc_username and pc_password from the configs + that can be used to query the PC + 2. Checks if the file has "clusters" entity, if it exists + a. If "cluster_ip"s are specified as keys, it creates "pe_session" from pe_username, pe_password and + "cluster_info" with cluster details + b. If "cluster_name"s are specified, we leverage PC to find the IP, create "pe_session" and create + "cluster_info" with cluster name + + Eg config: file1 + ---------------------------------- + pc_ip: 10.1.1.1 + pc_username: admin + pc_password: nutanix/4u + pe_username: admin + pe_password: nutanix/4u + + clusters: + # configure the clusters that are already registered to a PC + # cluster-name + 10.1.1.110: + dsip: '' + cluster-02: {} + cluster-03: {} + ---------------------------------- + Response: self.data object -> updated + + { + 'project_root': PosixPath('path'), + 'schema': {}, + 'input_files': ['file1'], + 'pc_ip': '10.1.1.1', + 'pc_username': 'admin', + 'pc_password': 'nutanix/4u', + 'pe_username': 'admin', + 'pe_password': 'nutanix/4u', + 'clusters': { + '10.1.1.110': { + 'dsip': '', + 'cluster_info': {'name': 'cluster-01', 'uuid': '0005f033-4b58-4d1a-0000-000000011115', ...}, + 'pe_session': + }, + '10.1.1.111': { + 'cluster_info': {'name': 'cluster-02'}, + 'pe_session': + }, + '10.1.1.112': { + 'cluster_info': {'name': 'cluster-03'}, + 'pe_session': } + }, + 'pc_session': + } + } + """ + + # If Pc details are passed, create PC session + if data.get("pc_ip") and data.get("pc_username") and data.get("pc_password"): + data["pc_session"] = RestAPIUtil(data["pc_ip"], user=data["pc_username"], + pwd=data["pc_password"], + port="9440", secured=True) + + # if clusters are specified, get their sessions + clusters = data.get("clusters", {}) + + pc_cluster_obj = None + + clusters_map = {} + for cluster_key, cluster_details in clusters.items(): + # pe_username should be either present in current object or global config + if "pe_username" in cluster_details: + pe_username = cluster_details["pe_username"] + elif "pe_username" in data: + pe_username = data["pe_username"] + else: + raise Exception(f"PE credentials not specified for the cluster {cluster_key}") + + # pe_password should be either present in current object or global config + if "pe_password" in cluster_details: + pe_password = cluster_details["pe_password"] + elif "pe_password" in data: + pe_password = data["pe_password"] + else: + raise Exception(f"PE credentials not specified for the cluster {cluster_key}") + + cluster_info = {} + # check if cluster keys are names or ip + if not validate_ip("cluster_ip", cluster_key, lambda x, y: x): + # need to fetch cluster_ip from PC if names are specified + if data.get("pc_session"): + if not pc_cluster_obj: + pc_cluster_obj = PcCluster(data["pc_session"]) + pc_cluster_obj.get_pe_info_list() + + cluster_uuid = pc_cluster_obj.name_uuid_map.get(cluster_key) + cluster_info = {"name": cluster_key} + cluster_ip = pc_cluster_obj.uuid_ip_map.get(cluster_uuid) + if not cluster_ip: + raise Exception(f"Cannot get Cluster IP for {cluster_key}") + else: + raise Exception("PC details (pc_ip, pc_username, pc_password) are to be provided when only " + "cluster names are specified!") + else: + # cluster_keys are IPs + cluster_ip = cluster_key + if cluster_details.get("name"): + cluster_info = {"name": cluster_details["name"]} + + # Create PE session with cluster_ip + pe_session = RestAPIUtil(cluster_ip, user=pe_username, pwd=pe_password, port="9440", secured=True) + cluster_op = PeCluster(pe_session) + try: + cluster_op.get_cluster_info() + # Add cluster info by default + cluster_info.update(cluster_op.cluster_info) + except Exception as e: + logger.warning("Unable to connect to PE") + + if not cluster_op.cluster_info: + logger.warning(f"Couldn't fetch Cluster information for the cluster {cluster_ip}") + + # Add cluster details + clusters_map[cluster_ip] = cluster_details + clusters_map[cluster_ip]["cluster_info"] = cluster_info + clusters_map[cluster_ip]["pe_session"] = pe_session + + data["clusters"] = clusters_map if clusters_map else data.get("clusters", {}) diff --git a/framework/helpers/log_utils.py b/framework/helpers/log_utils.py new file mode 100644 index 0000000..aad8125 --- /dev/null +++ b/framework/helpers/log_utils.py @@ -0,0 +1,93 @@ +import logging +import time +import sys +from typing import Union +from rainbow_logging_handler import RainbowLoggingHandler + + +class ConfigureRootLogger: + """ + customization the root logger + """ + def __init__(self, debug: False): + """ + Build CustomLogger based on logging module + + Args: + debug: If we want to set debug or not + + Returns: + None + """ + # create console handler + self._ch = RainbowLoggingHandler(sys.stderr, color_message_info=('green', None, False)) + + # create file handler + self._fh = logging.FileHandler("script_log.log", mode='w') + + # add formatter to console handler + self.__add_console_formatter(self._ch) + + # add formatter to file handler + self.__add_file_formatter(self._fh) + + level = logging.DEBUG if debug else logging.INFO + logging.basicConfig( + level=level, + handlers=[self._ch, self._fh] + ) + + @staticmethod + def __add_console_formatter(ch: RainbowLoggingHandler): + """ + add ColorFormatter with custom colors for each log level + + Args: + ch + + Returns + None + """ + + fmt = ( + "[%(asctime)s] " + "[%(threadName)s] " + "[%(levelname)s] %(message)s" + ) + + formatter = logging.Formatter(fmt) + formatter.converter = time.gmtime + + # add formatter to console handler + ch.setFormatter(formatter) + + @staticmethod + def __add_file_formatter(fh: logging.FileHandler): + """ + add file formatter with custom colors for each log level + + Args: + fh + + Returns + None + """ + fmt = ( + "[%(asctime)s] " + "[%(threadName)s] " + "[%(levelname)s] " + "[%(module)s.%(funcName)s():%(lineno)s] %(message)s" + ) + + formatter = logging.Formatter(fmt) + formatter.converter = time.gmtime + + # add formatter to file handler + fh.setFormatter(formatter) + + +def get_logger(name): + """returns a new logger""" + + logging_handle = logging.getLogger(name) + return logging_handle diff --git a/framework/helpers/rest_utils.py b/framework/helpers/rest_utils.py new file mode 100644 index 0000000..fcfb24c --- /dev/null +++ b/framework/helpers/rest_utils.py @@ -0,0 +1,125 @@ +import json +import requests +import urllib3 +from .log_utils import get_logger +from requests.exceptions import Timeout, ConnectionError, HTTPError, RequestException +from requests.auth import HTTPBasicAuth +from typing import Optional +from helpers.exception_utils import RestError, ResponseError + +logger = get_logger(__name__) + + +def rest_api_call(func): + """ + Decorator function to handle API calls and exceptions + + Args: + func: The function that is making the call + :param func: + :return: + """ + + def make_call(*args, **kwargs): + try: + r = func(*args, **kwargs) + if r.headers.get('Content-Type') == 'application/json': + response = r.json() + else: + response = r.content + response = response.decode("utf-8") + # Sometimes json response is sent back as a string + try: + response = json.loads(response) + except Exception: + logger.debug("Cannot parse string response to json") + + logger.debug(response) + r.raise_for_status() + except HTTPError as errh: + if str(errh.response.status_code).startswith("5") or str(errh.response.status_code).startswith("4"): + logger.error(response) + raise RestError(message=str(errh), error="HTTPError") + except ConnectionError as errc: + raise RestError(message=str(errc), error="ConnectionError") + except Timeout as errt: + raise RestError(message=str(errt), error="Timeout Error") + except RequestException as err: + raise RestError(message=str(err), error="Request Exception") + except KeyboardInterrupt: + raise KeyboardInterrupt() + except Exception as e: + raise RestError(message=str(e), error="UnexpectedError") + + if str(response) == '': + raise ResponseError(message=str(response), error="LoginFailed") + elif str(response) == '': + raise ResponseError(message=str(response), error="BadGateway") + return response + + return make_call + + +class RestAPIUtil: + def __init__(self, ip_address: str, user: Optional[str], pwd: Optional[str], headers: dict = None, + secured: bool = True, port: str = ""): + self.__IP_ADDRESS = ip_address + self.__SSL_ENABLED = bool(secured) + self.__PORT = f":{port}" if port else port + self.__session = requests.Session() + self.__headers = headers if headers else { + 'Content-Type': 'application/json', + 'Accept': 'application/json' + } + if user and pwd: + self.__session.auth = HTTPBasicAuth(user, pwd) + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + @rest_api_call + def post(self, uri: str, headers: dict = None, data: dict = None, jsonify=True, **kwargs): + headers = headers if headers else self.__headers + data = {} if not data else data + url = self.prepare_url(uri) + + logger.debug("POST request for the URL: " + url) + data = json.dumps(data) if jsonify else data + + logger.debug("Data") + logger.debug(data) + response = self.__session.post(url, headers=headers, data=data, verify=False, **kwargs) + + return response + + @rest_api_call + def put(self, uri: str, headers: dict = None, data: dict = None, jsonify=True, **kwargs): + headers = headers if headers else self.__headers + data = {} if not data else data + url = self.prepare_url(uri) + + logger.debug("PUT request for the URL: " + url) + data = json.dumps(data) if jsonify else data + + response = self.__session.put(url, headers=headers, data=data, verify=False, **kwargs) + response.raise_for_status() + return response + + @rest_api_call + def get(self, uri: str, headers: dict = None, data: dict = None, **kwargs): + headers = headers if headers else self.__headers + data = {} if not data else data + url = self.prepare_url(uri) + headers = {} if not headers else headers + data = {} if not data else data + + logger.debug("GET request for the URL: " + url) + response = self.__session.get(url, headers=headers, data=json.dumps(data), verify=False, **kwargs) + response.raise_for_status() + return response + + def prepare_url(self, uri): + return f"{self.get_protocol()}://{self.__IP_ADDRESS}{self.__PORT}/{uri}" + + def get_protocol(self): + if self.__SSL_ENABLED <= 0: + return 'http' + return 'https' diff --git a/framework/helpers/schema.py b/framework/helpers/schema.py new file mode 100644 index 0000000..4cd4601 --- /dev/null +++ b/framework/helpers/schema.py @@ -0,0 +1,1017 @@ +from .general_utils import validate_ip, contains_whitespace, validate_domain, validate_ip_list + +""" +We are using a popular Python library "cerberus" to define the json/ yml schema +https://docs.python-cerberus.org/en/stable/validation-rules.html +""" + +GLOBAL_NETWORK_SCHEMA = { + 'global_network': { + 'required': True, + 'type': 'dict', + 'schema': { + 'ntp_servers': { + 'type': 'list', + 'required': False + }, + 'dns_servers': { + 'type': 'list', + 'required': False + } + } + } +} + +IMAGING_SCHEMA = { + 'pc_ip': { + 'type': 'string', + 'required': True, + 'validator': validate_ip + }, + 'pc_username': { + 'required': True, + 'type': 'string', + 'validator': contains_whitespace + }, + 'pc_password': { + 'required': True, + 'type': 'string', + 'validator': contains_whitespace + }, + 'site_name': { + 'required': True, + 'type': 'string' + }, + 'blocks_serial_numbers': { + 'required': False, + 'type': 'list', + 'schema': { + 'type': 'string', + 'validator': contains_whitespace + } + }, + 'use_existing_network_settings': { + 'required': True, + 'type': 'boolean' + }, + 're-image': { + 'required': True, + 'type': 'boolean' + }, + "imaging_parameters": { + 'type': 'dict', + 'required': False, + 'schema': { + 'aos_version': { + 'required': True, + 'type': ['float', 'string'] + }, + 'hypervisor_type': { + 'required': True, + 'type': 'string', + 'allowed': ['kvm', 'esx', 'hyperv'] + }, + 'hypervisor_version': { + 'required': True, + 'type': ['float', 'string'] + } + } + }, + 'network': { + 'type': 'dict', + 'required': False, + 'schema': { + 'mgmt_static_ips': { + 'required': True, + 'type': 'list', + 'empty': True, + 'schema': { + 'type': 'string', + 'validator': validate_ip + } + }, + 'mgmt_netmask': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'mgmt_gateway': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'ipmi_netmask': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'ipmi_gateway': { + 'required': True, + 'empty': True, + 'type': 'string', + 'validator': validate_ip + }, + 'ipmi_static_ips': { + 'required': True, + 'type': 'list', + 'empty': True, + 'schema': { + 'type': 'string', + 'validator': validate_ip + } + } + } + }, + 'clusters': { + 'required': True, + 'type': 'dict', + "keyschema": {"type": "string"}, + "valueschema": { + "type": "dict", + "schema": { + "cluster_size": { + 'type': 'integer', + 'required': True, + }, + "cluster_vip": { + 'type': 'string', + 'required': True, + 'validator': validate_ip + }, + "cvm_ram": { + 'type': 'integer', + 'required': True, + 'min': 12 + }, + "redundancy_factor": { + 'type': 'integer', + 'required': True, + 'allowed': [2, 3] + }, + 'node_serials': { + 'type': 'list', + 'required': False, + 'schema': { + 'type': 'string', + 'validator': contains_whitespace + } + }, + 'network': { + 'type': 'dict', + 'required': False, + 'schema': { + 'mgmt_static_ips': { + 'required': True, + 'type': 'list', + 'empty': True, + 'schema': { + 'type': 'string', + 'validator': validate_ip + } + }, + 'mgmt_netmask': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'mgmt_gateway': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'ipmi_netmask': { + 'required': True, + 'type': 'string', + 'empty': True, + 'validator': validate_ip + }, + 'ipmi_gateway': { + 'required': True, + 'empty': True, + 'type': 'string', + 'validator': validate_ip + }, + 'ipmi_static_ips': { + 'required': True, + 'type': 'list', + 'empty': True, + 'schema': { + 'type': 'string', + 'validator': validate_ip + } + } + } + }, + 'use_existing_network_settings': { + 'required': False, + 'type': 'boolean' + }, + 're-image': { + 'required': False, + 'type': 'boolean' + } + } + } + }, +} + +IMAGING_SCHEMA.update(GLOBAL_NETWORK_SCHEMA) + +USERNAME_PASSWORD_SCHEMA = { + 'required': True, + 'type': 'string', + 'validator': contains_whitespace +} + +EULA_SCHEMA = { + 'type': 'dict', + 'schema': { + 'username': { + 'type': 'string', + 'required': True, + 'empty': False + }, + 'company_name': { + 'type': 'string', + 'required': True, + 'empty': False + }, + 'job_title': { + 'type': 'string', + 'required': True, + 'empty': False + } + } +} + +PULSE_SCHEMA = { + 'type': 'boolean' +} + +AD_SCHEMA = { + 'type': 'dict', + 'required': True, + 'schema': { + 'directory_type': { + 'type': 'string', + 'required': True, + 'allowed': ["ACTIVE_DIRECTORY"], + 'empty': False + }, + 'ad_name': { + 'type': 'string', + 'required': True, + 'empty': False + }, + 'ad_domain': { + 'type': 'string', + 'required': True, + 'empty': False, + 'validator': validate_domain + }, + 'ad_server_ip': { + 'type': 'string', + 'required': True, + 'empty': False, + 'validator': validate_ip + }, + 'service_account_username': { + 'type': 'string', + 'required': True, + 'empty': False, + 'validator': contains_whitespace + }, + 'service_account_password': { + 'type': 'string', + 'required': True, + 'empty': False, + 'validator': contains_whitespace + }, + 'role_mappings': { + 'type': 'list', + 'required': True, + 'schema': { + 'type': 'dict', + 'schema': { + 'role_type': { + 'required': True, + 'type': 'string', + 'allowed': ['ROLE_CLUSTER_ADMIN', 'ROLE_USER_ADMIN', 'ROLE_CLUSTER_VIEWER', 'ROLE_BACKUP_ADMIN'] + }, + 'entity_type': { + 'required': True, + 'type': 'string', + 'allowed': ['GROUP', 'OU', 'USER'] + }, + 'values': { + 'required': True, + 'type': 'list' + } + } + } + } + } +} + +PE_CONTAINERS = { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string', + 'required': True + }, + 'advertisedCapacity_in_gb': { + 'type': 'integer', + 'required': False + }, + 'replication_factor': { + 'type': 'integer', + 'required': False + }, + 'compression_enabled': { + 'type': 'boolean', + 'required': False + }, + 'compression_delay_in_secs': { + 'type': 'integer', + 'required': False + }, + 'erasure_code': { + 'type': 'string', + 'required': False, + 'allowed': ['OFF', 'ON'] + }, + 'on_disk_dedup': { + 'type': 'string', + 'required': False, + 'allowed': ['OFF', 'ON'] + }, + 'nfsWhitelistAddress': { + 'type': 'list', + 'required': False + } + } + } +} + +PE_NETWORKS = { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string', + }, + 'vlan_id': { + 'type': 'integer', + }, + 'network_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'network_prefix': { + 'type': 'integer' + }, + 'default_gateway_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'pool_list': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'range': { + 'type': 'string' + } + } + } + }, + 'dhcp_options': { + 'type': 'dict', + 'schema': { + 'domain_name_server_list': { + 'type': 'list', + 'validator': validate_ip_list + }, + 'domain_search_list': { + 'type': 'list', + 'validator': validate_domain + }, + 'domain_name': { + 'type': 'string', + 'validator': validate_domain + } + } + } + } + } +} + +POD_REMOTE_AZS = { + 'type': 'dict', + "keyschema": {"type": "string"}, + 'valueschema': { + 'type': 'dict', + 'schema': { + 'username': { + 'type': 'string', + 'validator': contains_whitespace + }, + 'password': { + 'type': 'string', + 'validator': contains_whitespace + } + } + } +} + +POD_CATEGORIES_SCHEMA = { + "type": "list", + 'schema': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'values': { + 'type': 'list', + 'schema': { + 'type': 'string' + } + } + } + } +} + +POD_RECOVERY_PLAN_SCHEMA = { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string' + }, + 'desc': { + 'type': 'string' + }, + 'primary_location': { + 'type': 'dict', + 'schema': { + 'availability_zone': { + 'type': 'string', + 'validator': validate_ip + }, + } + }, + 'recovery_location': { + 'type': 'dict', + 'schema': { + 'availability_zone': { + 'type': 'string', + 'validator': validate_ip + }, + } + }, + 'stages': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'vms': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'}, + 'enable_script_exec': {'type': 'boolean'}, + 'delay': {'type': 'integer'} + } + } + }, + 'categories': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'key': {'type': 'string'}, + 'value': {'type': 'string'} + } + } + } + } + } + }, + 'network_type': { + 'type': 'string', + 'allowed': ['NON_STRETCH', 'STRETCH'] + }, + 'network_mappings': { + "type": "list", + "schema": { + "type": "dict", + "schema": { + "primary": { + "type": "dict", + "schema": { + "test": { + "type": "dict", + 'schema': { + 'name': {'type': 'string'}, + 'gateway_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'prefix': {'type': 'integer'} + } + }, + "prod": { + "type": "dict", + 'schema': { + 'name': {'type': 'string'}, + 'gateway_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'prefix': {'type': 'integer'} + } + }, + } + }, + "recovery": { + "type": "dict", + "schema": { + "test": { + "type": "dict", + 'schema': { + 'name': {'type': 'string'}, + 'gateway_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'prefix': {'type': 'integer'} + } + }, + "prod": { + "type": "dict", + 'schema': { + 'name': {'type': 'string'}, + 'gateway_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'prefix': {'type': 'integer'} + } + }, + } + } + } + } + } + } + } +} + +INBOUND_OUTBOUND_SCHEMA = { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'address': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'} + } + }, + 'categories': { + 'type': 'dict', + 'keyschema': {'type': 'string'}, + 'valueschema': { + 'type': 'list' + } + }, + 'protocol': { + 'type': 'dict', + 'schema': { + 'service': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'} + } + } + } + } + } + } +} + +POD_RETENTION_POLCY = { + 'type': 'dict', + 'schema': { + 'num_snapshots': { + 'type': 'integer' + }, + 'rollup_retention_policy': { + 'type': 'dict', + 'schema': { + 'snapshot_interval_type': { + 'type': 'string', + 'allowed': ['HOURLY', 'DAILY', 'WEEKLY', 'MONTHLY', 'YEARLY'] + }, + 'multiple': {'type': 'integer'} + } + } + } +} + +POD_PROTECTION_RULES_SCHEMA = { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string' + }, + 'desc': { + 'type': 'string' + }, + 'protected_categories': { + 'type': 'dict', + 'keyschema': {'type': 'string'}, + 'valueschema': { + 'type': 'list' + } + }, + 'schedules': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'source': { + 'schema': { + 'availability_zone': { + 'type': 'string', + 'validator': validate_ip + }, + 'cluster': { + 'type': 'string' + } + } + }, + 'destination': { + 'type': 'dict', + 'schema': { + 'availability_zone': { + 'type': 'string', + 'validator': validate_ip + }, + 'cluster': { + 'type': 'string' + } + } + }, + 'protection_type': { + 'type': 'string', + 'allowed': ["ASYNC", "SYNC"] + }, + 'rpo': { + 'type': 'integer' + }, + 'rpo_unit': { + 'type': 'string', + 'allowed': ["MINUTE", "HOUR", "DAY", "WEEK"] + }, + 'snapshot_type': { + 'type': 'string', + 'allowed': ["CRASH_CONSISTENT", "APPLICATION_CONSISTENT"] + }, + "local_retention_policy": POD_RETENTION_POLCY, + "remote_retention_policy": POD_RETENTION_POLCY + } + } + } + } + } +} + +POD_SECURITY_POLICIES_SCHEMA = { + "type": "list", + 'schema': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'app_rule': { + 'type': 'dict', + 'schema': { + 'policy_mode': { + 'type': 'string', + 'allowed': ["MONITOR", "APPLY"] + }, + 'target_group': { + 'type': 'dict', + 'schema': { + 'categories': { + 'type': 'dict', + 'schema': { + 'AppType': {'type': 'string'} + } + } + } + }, + 'inbounds': INBOUND_OUTBOUND_SCHEMA, + 'outbounds': INBOUND_OUTBOUND_SCHEMA + } + }, + 'allow_ipv6_traffic': { + 'type': 'boolean' + }, + 'hitlog': { + 'type': 'boolean' + } + } + } +} + +POD_ADDRESS_GROUP_SCHEMA = { + "type": "list", + 'schema': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'subnets': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'network_ip': { + 'type': 'string', + 'validator': validate_ip + }, + 'network_prefix': { + 'type': 'integer' + } + } + } + } + } + } +} + +POD_SERVICE_GROUP_SCHEMA = { + "type": "list", + 'schema': { + 'type': 'dict', + 'schema': { + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'service_details': { + 'type': 'dict', + 'keyschema': {'type': 'string'}, + 'valueschema': { + 'type': 'list' + } + } + } + } +} + +POD_CLUSTER_SCHEMA = { + 'required': True, + 'type': 'dict', + "keyschema": {"type": "string"}, + "valueschema": { + "type": "dict", + "schema": { + "name": { + 'type': 'string', + 'required': True, + }, + "dsip": { + 'type': 'string', + 'required': True, + 'validator': validate_ip + }, + 'pe_username': USERNAME_PASSWORD_SCHEMA, + 'pe_password': USERNAME_PASSWORD_SCHEMA, + 'eula': EULA_SCHEMA, + 'enable_pulse': PULSE_SCHEMA, + 'directory_services': AD_SCHEMA, + 'networks': PE_NETWORKS, + 'containers': PE_CONTAINERS + } + } +} + +POD_CONFIG_SCHEMA = { + 'pods': { + 'type': 'list', + 'required': True, + 'schema': { + 'type': 'dict', + 'valueschema': { + 'type': 'dict', + 'schema': { + 'pc_ip': { + 'type': 'string', + 'required': True, + 'validator': validate_ip + }, + 'pc_username': USERNAME_PASSWORD_SCHEMA, + 'pc_password': USERNAME_PASSWORD_SCHEMA, + 'remote_azs': POD_REMOTE_AZS, + 'protection_rules': POD_PROTECTION_RULES_SCHEMA, + 'recovery_plans': POD_RECOVERY_PLAN_SCHEMA, + "categories": POD_CATEGORIES_SCHEMA, + "address_groups": POD_ADDRESS_GROUP_SCHEMA, + "service_groups": POD_SERVICE_GROUP_SCHEMA, + "security_policies": POD_SECURITY_POLICIES_SCHEMA, + 'clusters': POD_CLUSTER_SCHEMA + } + } + } + } +} + +CREATE_VM_WORKLOAD_SCHEMA = { + 'pc_ip': { + 'required': True, + 'type': 'string', + 'validator': validate_ip + }, + 'pc_username': { + 'required': True, + 'type': 'string', + 'validator': contains_whitespace + }, + 'pc_password': { + 'required': True, + 'type': 'string' + }, + 'site_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'project_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'account_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'subnets': { + 'required': True, + 'empty': False, + 'type': 'dict' + }, + 'bp_list': { + 'required': True, + 'empty': False, + 'type': 'list', + 'schema': { + 'type': 'dict', + 'empty': False, + 'schema': { + 'dsl_file': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'name': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'app_name': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'cluster': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'subnet': { + 'required': True, + 'empty': False, + 'type': 'string', + } + } + } + } +} + +CREATE_AI_WORKLOAD_SCHEMA = { + 'pc_ip': { + 'required': True, + 'type': 'string', + 'validator': validate_ip + }, + 'pc_username': { + 'required': True, + 'type': 'string', + 'validator': contains_whitespace + }, + 'pc_password': { + 'required': True, + 'type': 'string' + }, + 'site_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'project_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'account_name': { + 'required': True, + 'empty': False, + 'type': 'string' + }, + 'subnets': { + 'required': True, + 'empty': False, + 'type': 'dict' + }, + 'bp_list': { + 'required': True, + 'empty': False, + 'type': 'list', + 'schema': { + 'type': 'dict', + 'empty': False, + 'schema': { + 'dsl_file': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'name': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'app_name': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'cluster': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'subnet': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'variable_list': { + 'required': False, + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'value': { + 'required': True, + 'type': 'dict', + 'schema': { + 'value': { + 'required': True, + 'empty': False, + 'type': 'string', + } + } + }, + 'context': { + 'required': True, + 'empty': False, + 'type': 'string', + }, + 'name': { + 'required': True, + 'empty': False, + 'type': 'string', + } + } + } + } + } + } + } +} diff --git a/framework/helpers/workflow_utils.py b/framework/helpers/workflow_utils.py new file mode 100644 index 0000000..57dee37 --- /dev/null +++ b/framework/helpers/workflow_utils.py @@ -0,0 +1,22 @@ +from typing import Type, List, Callable +from scripts.python.script import Script +from helpers.general_utils import run_script +from .log_utils import get_logger + +logger = get_logger(__name__) + + +class Workflow: + def __init__(self, **data): + self.data = data + + def run_scripts(self, scripts: List[Type[Script]]): + logger.info(f"Running the {type(self).__name__}...") + + # run the scripts + run_script(scripts, self.data) + + def run_functions(self, functions: List[Callable]): + for func in functions: + logger.info(f"Calling the action '{func.__name__}'...") + func(self.data) diff --git a/framework/main.py b/framework/main.py new file mode 100644 index 0000000..41385ef --- /dev/null +++ b/framework/main.py @@ -0,0 +1,127 @@ +""" + This file is the starting point of the framework + The process of adding new workflow/ job to the framework is as follows: + 1. Add a new workflow_type by adding a new case statement in the below match, case expression + 2. Add/ modify the pre_run_actions, post_run_actions or keep the default actions, which would run before and + after the scripts respectively + 3. To add an action/ function to the pre_run_actions or post_run_actions, you can define the function + preferably in "helpers/helper_functions.py" file and add to the "pre_run_actions" or "post_run_actions" + list variables. + 4. "schema" will verify the SCHEMA of the input file. You have to define the schema in "helpers/schema.py" + 5. Finally, you'll define the Scripts that are to be executed by the workflow. You'll have to write the + scripts in "scripts/python" + 6. Note the order in which functions/ scripts are defined in actions/ scripts is the order of execution of + actions/ scripts +""" + + +import argparse +import os +import sys +from pathlib import Path +from helpers.log_utils import get_logger, ConfigureRootLogger +from helpers.workflow_utils import Workflow +from scripts.python.create_address_groups_pc import CreateAddressGroups +from scripts.python.create_bp_calm import CreateBp +from scripts.python.create_container_pe import CreateContainerPe +from scripts.python.enable_microseg_pc import EnableMicroseg +from scripts.python.foundation_script import FoundationScript +from scripts.python.create_service_groups_pc import CreateServiceGroups +from scripts.python.create_security_policy_pc import CreateNetworkSecurityPolicy +from scripts.python.launch_calm_bp import LaunchBp +from scripts.python.register_pe_to_pc import RegisterToPc +from scripts.python.add_ad_server_pe import AddAdServerPe +from scripts.python.create_rolemapping_pe import CreateRoleMapping +from scripts.python.create_pc_categories import CreateCategoryPc +from scripts.python.create_pc_subnets import CreateSubnetsPc +from scripts.python.update_dsip_pe import UpdateDsip +from helpers.helper_functions import create_pe_pc_objects +from scripts.python.update_calm_project import UpdateCalmProject +from scripts.python.init_calm_dsl import InitCalmDsl +from scripts.python.initial_cluster_config import InitialClusterConfig +from scripts.python.configure_pod import PodConfig +from helpers.schema import IMAGING_SCHEMA, CREATE_VM_WORKLOAD_SCHEMA, CREATE_AI_WORKLOAD_SCHEMA, POD_CONFIG_SCHEMA +from helpers.helper_functions import get_input_data, validate_input_data, get_aos_url_mapping, \ + get_hypervisor_url_mapping, save_logs + +parser = argparse.ArgumentParser(description="Description") +parser.add_argument("--workflow", type=str, help="workflow to run", required=True) +parser.add_argument("-f", "--file", type=str, help="input file", required=True) +parser.add_argument("--debug", action='store_true') +args = parser.parse_args() + +# Find path to the project root +project_root = Path(__file__).parent.parent + + +def main(): + workflow_type = args.workflow + files = [f"{project_root}/{file.strip()}" for file in args.file.split(",")] + + # initialize the logger + logger = get_logger(__name__) + + for file in files: + if not os.path.exists(file): + logger.error("Specify the correct path of the input file or check the name!") + sys.exit(1) + + pre_run_actions = [get_input_data, validate_input_data] + post_run_actions = [save_logs] + schema = {} + + match workflow_type: + case "imaging": + pre_run_actions += [get_aos_url_mapping, get_hypervisor_url_mapping] + schema = IMAGING_SCHEMA + scripts = [FoundationScript] + case "config-cluster": + pre_run_actions += [create_pe_pc_objects] + scripts = [InitialClusterConfig, RegisterToPc, AddAdServerPe, CreateRoleMapping, UpdateDsip, + CreateContainerPe, CreateSubnetsPc, CreateCategoryPc, EnableMicroseg, + CreateAddressGroups, CreateServiceGroups, CreateNetworkSecurityPolicy] + case "calm-vm-workloads": + schema = CREATE_VM_WORKLOAD_SCHEMA + scripts = [InitCalmDsl, UpdateCalmProject, CreateBp, LaunchBp] + case "calm-edgeai-vm-workload": + schema = CREATE_AI_WORKLOAD_SCHEMA + scripts = [InitCalmDsl, UpdateCalmProject, CreateBp, LaunchBp] + case "pod-config": + schema = POD_CONFIG_SCHEMA + scripts = [PodConfig] + # case "example-workflow-type": + # schema = EXAMPLE_SCHEMA # EXAMPLE_SCHEMA is a dict defined in helpers/schema.py + # pre_run_actions = [new_function1, new_function2] # either create a new actions list (or) + # post_run_actions += [new_function3] # modify existing actions list + # scripts = [ExampleScript1, ExampleScript2] # ExampleScript1 is .py file which inherits "Script" class + case _: + logger.error("Select the correct workflow") + sys.exit(1) + + if validate_input_data in pre_run_actions and not schema: + logger.error("Schema is empty! " + "Schema has to be provided if validate_input_data is specified in pre_run_actions!") + sys.exit(1) + + # create a workflow and run it + wf_handler = Workflow( + project_root=project_root, + schema=schema, + input_files=files + ) + + # run the pre run functions + wf_handler.run_functions(pre_run_actions) + + # run scripts + wf_handler.run_scripts(scripts) + + # run the post run functions + wf_handler.run_functions(post_run_actions) + + +if __name__ == '__main__': + # Call the main function + debug = args.debug + ConfigureRootLogger(debug) + main() \ No newline at end of file diff --git a/framework/requirements/calm.in b/framework/requirements/calm.in new file mode 100644 index 0000000..663e8f4 --- /dev/null +++ b/framework/requirements/calm.in @@ -0,0 +1,25 @@ +ruamel.yaml==0.16.12 +jinja2==3.0.3 +jsonref==0.2 +bidict==0.18.0 +requests==2.27.0 # version greater than this will require python >= 3.7 +requests_toolbelt==0.9.1 +docopt==0.6.2 +PTable==0.9.2 +Click==8.0.4 # version greater than this will require python >= 3.7 +click_completion==0.5.2 +click-didyoumean==0.0.3 +click-repl==0.2.0 +colorama==0.4.1 +arrow==0.15.1 +jsonschema==3.2.0 +anytree==2.8.0 +asciimatics>=1.13.0 +peewee==3.10.0 +pycryptodome==3.15.0 # version greater than this will require python >= 3.7 +scrypt==0.8.20 +schema==0.7.1 +colorlog==5.0.1 +black==22.6.0 +importlib-metadata==4.6.0 # version greater than this will require python >= 3.7 +backports.zoneinfo==0.2.1;python_version<"3.9" diff --git a/framework/requirements/common.in b/framework/requirements/common.in new file mode 100644 index 0000000..f04a989 --- /dev/null +++ b/framework/requirements/common.in @@ -0,0 +1,6 @@ +requests==2.27.0 +rainbow_logging_handler==2.2.2 +PyYAML==6.0 +cerberus==1.3.4 +json5==0.9.11 +PyYAML==6.0 \ No newline at end of file diff --git a/framework/requirements/dev.in b/framework/requirements/dev.in new file mode 100644 index 0000000..8ebb32d --- /dev/null +++ b/framework/requirements/dev.in @@ -0,0 +1,8 @@ +pip==23.0 +pip-tools==6.12.2 +pytest==6.2.4 +pytest-cov==2.12.1 +requests_mock==1.9.3 +flake8==3.9.2 +-r common.in +-r calm.in \ No newline at end of file diff --git a/framework/requirements/dev.txt b/framework/requirements/dev.txt new file mode 100644 index 0000000..466c667 --- /dev/null +++ b/framework/requirements/dev.txt @@ -0,0 +1,178 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=requirements/dev.txt requirements/dev.in +# +anytree==2.8.0 + # via -r requirements/calm.in +arrow==0.15.1 + # via -r requirements/calm.in +asciimatics==1.14.0 + # via -r requirements/calm.in +attrs==21.2.0 + # via + # jsonschema + # pytest +bidict==0.18.0 + # via -r requirements/calm.in +black==22.6.0 + # via -r requirements/calm.in +build==0.10.0 + # via pip-tools +cerberus==1.3.4 + # via -r requirements/common.in +certifi==2021.5.30 + # via requests +charset-normalizer==2.0.12 + # via requests +click==8.0.4 + # via + # -r requirements/calm.in + # black + # click-completion + # click-didyoumean + # click-repl + # pip-tools +click-completion==0.5.2 + # via -r requirements/calm.in +click-didyoumean==0.0.3 + # via -r requirements/calm.in +click-repl==0.2.0 + # via -r requirements/calm.in +colorama==0.4.1 + # via + # -r requirements/calm.in + # rainbow-logging-handler +colorlog==5.0.1 + # via -r requirements/calm.in +contextlib2==0.5.5 + # via schema +coverage==5.5 + # via pytest-cov +docopt==0.6.2 + # via -r requirements/calm.in +flake8==3.9.2 + # via -r requirements/dev.in +future==0.18.3 + # via asciimatics +idna==2.10 + # via requests +importlib-metadata==4.6.0 + # via -r requirements/calm.in +iniconfig==1.1.1 + # via pytest +jinja2==3.0.3 + # via + # -r requirements/calm.in + # click-completion +json5==0.9.11 + # via -r requirements/common.in +jsonref==0.2 + # via -r requirements/calm.in +jsonschema==3.2.0 + # via -r requirements/calm.in +logutils==0.3.5 + # via rainbow-logging-handler +markupsafe==2.1.2 + # via jinja2 +mccabe==0.6.1 + # via flake8 +mypy-extensions==1.0.0 + # via black +packaging==21.0 + # via + # build + # pytest +pathspec==0.11.0 + # via black +peewee==3.10.0 + # via -r requirements/calm.in +pillow==9.4.0 + # via asciimatics +pip-tools==6.12.2 + # via -r requirements/dev.in +platformdirs==3.0.0 + # via black +pluggy==0.13.1 + # via pytest +prompt-toolkit==3.0.37 + # via click-repl +ptable==0.9.2 + # via -r requirements/calm.in +py==1.10.0 + # via pytest +pycodestyle==2.7.0 + # via flake8 +pycryptodome==3.15.0 + # via -r requirements/calm.in +pyfiglet==0.8.post1 + # via asciimatics +pyflakes==2.3.1 + # via flake8 +pyparsing==2.4.7 + # via packaging +pyproject-hooks==1.0.0 + # via build +pyrsistent==0.19.3 + # via jsonschema +pytest==6.2.4 + # via + # -r requirements/dev.in + # pytest-cov +pytest-cov==2.12.1 + # via -r requirements/dev.in +python-dateutil==2.8.2 + # via arrow +pyyaml==6.0 + # via -r requirements/common.in +rainbow-logging-handler==2.2.2 + # via -r requirements/common.in +requests==2.27.0 + # via + # -r requirements/calm.in + # -r requirements/common.in + # requests-mock + # requests-toolbelt +requests-mock==1.9.3 + # via -r requirements/dev.in +requests-toolbelt==0.9.1 + # via -r requirements/calm.in +ruamel-yaml==0.16.12 + # via -r requirements/calm.in +schema==0.7.1 + # via -r requirements/calm.in +scrypt==0.8.20 + # via -r requirements/calm.in +shellingham==1.5.0.post1 + # via click-completion +six==1.16.0 + # via + # anytree + # click-completion + # click-repl + # jsonschema + # python-dateutil + # requests-mock +toml==0.10.2 + # via + # pytest + # pytest-cov +tomli==1.2.1 + # via + # black + # build +urllib3==1.26.6 + # via requests +wcwidth==0.2.6 + # via + # asciimatics + # prompt-toolkit +wheel==0.38.4 + # via pip-tools +zipp==3.15.0 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# pip +# setuptools diff --git a/framework/requirements/prod.in b/framework/requirements/prod.in new file mode 100644 index 0000000..ac8221b --- /dev/null +++ b/framework/requirements/prod.in @@ -0,0 +1,2 @@ +-r common.in +-r calm.in \ No newline at end of file diff --git a/framework/requirements/prod.txt b/framework/requirements/prod.txt new file mode 100644 index 0000000..ee5c717 --- /dev/null +++ b/framework/requirements/prod.txt @@ -0,0 +1,47 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --output-file=requirements/prod.txt requirements/prod.in +# +altgraph==0.17.3 + # via + # macholib + # pyinstaller +certifi==2022.12.7 + # via requests +chardet==4.0.0 + # via requests +click==8.0.1 + # via + # -r requirements/common.in + # click-option-group +click-option-group==0.5.3 + # via -r requirements/common.in +colorama==0.4.6 + # via rainbow-logging-handler +colorlog==6.7.0 + # via -r requirements/common.in +idna==2.10 + # via requests +logutils==0.3.5 + # via rainbow-logging-handler +macholib==1.16.2 + # via pyinstaller +pyinstaller==4.9 + # via -r requirements/common.in +pyinstaller-hooks-contrib==2022.15 + # via pyinstaller +rainbow-logging-handler==2.2.2 + # via -r requirements/common.in +requests==2.25.1 + # via + # -r requirements/common.in + # requests-toolbelt +requests-toolbelt==0.9.1 + # via -r requirements/common.in +urllib3==1.26.14 + # via requests + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/framework/scripts/__init__.py b/framework/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/__init__.py b/framework/scripts/python/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/add_ad_server_pe.py b/framework/scripts/python/add_ad_server_pe.py new file mode 100644 index 0000000..747ba11 --- /dev/null +++ b/framework/scripts/python/add_ad_server_pe.py @@ -0,0 +1,59 @@ +from helpers.log_utils import get_logger +from scripts.python.cluster_script import ClusterScript +from scripts.python.helpers.v1.authentication import AuthN + +logger = get_logger(__name__) + + +class AddAdServerPe(ClusterScript): + """ + The Script to add name servers to PE clusters + """ + + def __init__(self, data: dict, **kwargs): + super(AddAdServerPe, self).__init__(data, **kwargs) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + try: + pe_session = cluster_details["pe_session"] + + authn = AuthN(pe_session) + authn_payload = cluster_details.get("directory_services") + + if not authn_payload: + logger.warning(f"Authentication payload not specified for the cluster " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + return + + existing_directory_services = authn.get_directories() + ad = next((ad for ad in existing_directory_services + if ad.get("name") == authn_payload["ad_name"]), None) + + if ad: + logger.warning(f"{authn_payload['ad_name']} already exists in the cluster " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + return + + try: + response = authn.create_directory_services(**authn_payload) + + if isinstance(response, str): + self.exceptions.append(response) + except Exception as e: + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + self.exceptions.append(f"{type(self).__name__} failed for the cluster {cluster_info} " + f"with the error: {e}") + return + + logger.info(f"{authn_payload['ad_name']} created in the cluster " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + # todo check if verifications is needed? + pass diff --git a/framework/scripts/python/cluster_script.py b/framework/scripts/python/cluster_script.py new file mode 100644 index 0000000..15e438b --- /dev/null +++ b/framework/scripts/python/cluster_script.py @@ -0,0 +1,51 @@ +import threading +import multiprocessing +import concurrent.futures +from abc import abstractmethod +from helpers.log_utils import get_logger +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class ClusterScript(Script): + def __init__(self, data: dict, parallel: bool = True): + self.data = data + # pass the Cluster Objects + # create_pe_pc helper function can be used + self.pe_clusters = self.data.get("clusters", {}) + self.parallel = parallel + super(ClusterScript, self).__init__() + + def execute(self, **kwargs): + if self.parallel: + # Get the number of available CPU cores + num_cores = multiprocessing.cpu_count() + + # Set the value of max_workers based on the number of CPU cores + max_workers = num_cores + 4 + + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + executor.map(self.execute_single_cluster, self.pe_clusters.keys(), self.pe_clusters.values()) + except Exception as e: + self.exceptions.append(e) + else: + try: + for cluster_ip, cluster_details in self.pe_clusters.items(): + self.execute_single_cluster(cluster_ip, cluster_details) + except Exception as e: + self.exceptions.append(e) + + @abstractmethod + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + pass + + def verify(self, **kwargs): + pass + + def set_current_thread_name(self, cluster_ip: str): + current_thread = threading.current_thread() + + if current_thread != threading.main_thread(): + current_thread.name = f"Thread-{type(self).__name__}-{cluster_ip}" diff --git a/framework/scripts/python/configure_pod.py b/framework/scripts/python/configure_pod.py new file mode 100644 index 0000000..688121b --- /dev/null +++ b/framework/scripts/python/configure_pod.py @@ -0,0 +1,134 @@ +import time +from copy import deepcopy +from helpers.helper_functions import create_pe_pc_objects +from scripts.python.add_ad_server_pe import AddAdServerPe +from scripts.python.connect_to_az_pc import ConnectToAz +from scripts.python.create_address_groups_pc import CreateAddressGroups +from scripts.python.create_container_pe import CreateContainerPe +from scripts.python.create_pc_categories import CreateCategoryPc +from scripts.python.create_pc_subnets import CreateSubnetsPc +from scripts.python.create_protection_policy_pc import CreateProtectionPolicy +from scripts.python.create_recovery_plan import CreateRecoveryPlan +from scripts.python.create_rolemapping_pe import CreateRoleMapping +from scripts.python.create_security_policy_pc import CreateNetworkSecurityPolicy +from scripts.python.create_service_groups_pc import CreateServiceGroups +from scripts.python.enable_leap_pc import EnableLeap +from scripts.python.enable_microseg_pc import EnableMicroseg +from scripts.python.helpers.batch_script import BatchScript +from scripts.python.initial_cluster_config import InitialClusterConfig +from scripts.python.register_pe_to_pc import RegisterToPc +from scripts.python.script import Script +from helpers.log_utils import get_logger +from scripts.python.update_dsip_pe import UpdateDsip + +logger = get_logger(__name__) + + +class PodConfig(Script): + """ + Configure Pods with below configs + """ + def __init__(self, data: dict): + self.parent_batch_scripts = None + self.data = data + self.pods = self.data["pods"] + super(PodConfig, self).__init__() + + def execute(self): + start = time.time() + for pod in self.pods: + self.parent_batch_scripts = {} + for az, az_values in pod.items(): + # Create a parent Batch script for each AZ, run sequentially + self.parent_batch_scripts[az] = BatchScript() + # Create PC session for each AZ, PE sessions for all the clusters + create_pe_pc_objects(az_values) + # Get primary scripts + self.parent_batch_scripts[az].add(self.primary_scripts(az_values)) + # Get cluster update scripts + self.parent_batch_scripts[az].add(self.cluster_update_scripts(az_values)) + # Get secondary scripts + self.parent_batch_scripts[az].add(self.secondary_scripts(az_values)) + # Get tertiary scripts + self.parent_batch_scripts[az].add(self.tertiary_scripts(az_values)) + # Get DR scripts + self.parent_batch_scripts[az].add(self.configure_dr_scripts(az_values)) + + for pod in self.pods: + for az in pod.keys(): + self.parent_batch_scripts[az].run() + + total_time = time.time() - start + logger.info(f"Total time: {total_time:.2f} seconds") + + @staticmethod + def primary_scripts(data: dict) -> BatchScript: + primary_batch_scripts = BatchScript(parallel=True) + # Enable Flow in PC + # Create Categories in PC + # Create AZs in PC + # Initial cluster config in all clusters + + data = deepcopy(data) + scripts = [EnableMicroseg(data), + CreateCategoryPc(data), + ConnectToAz(data), + InitialClusterConfig(data)] + primary_batch_scripts.add_all(scripts) + return primary_batch_scripts + + @staticmethod + def cluster_update_scripts(data: dict) -> BatchScript: + cluster_scripts = BatchScript() + # Add Auth -> needs InitialClusterConfig + cluster_scripts.add(AddAdServerPe(data)) + return cluster_scripts + + @staticmethod + def secondary_scripts(data: dict) -> BatchScript: + secondary_batch_scripts = BatchScript(parallel=True) + # Register PE to PC -> needs InitialClusterConfig + # Create containers in PE -> needs InitialClusterConfig + # Update DSIP -> needs InitialClusterConfig, fails if we update DSIP with Auth + # Create AddressGroups -> needs RegisterToPc + # Create ServiceGroups -> needs RegisterToPc + + data = deepcopy(data) + scripts = [RegisterToPc(data), + CreateContainerPe(data), + UpdateDsip(data), + CreateAddressGroups(data), + CreateServiceGroups(data)] + secondary_batch_scripts.add_all(scripts) + return secondary_batch_scripts + + @staticmethod + def tertiary_scripts(data: dict) -> BatchScript: + tertiary_batch_scripts = BatchScript(parallel=True) + # Create Subnets in PC -> needs RegisterToPc + # Add Role-mappings -> needs AddAdServer + # Add Security Policies -> needs CreateAddressGroups, CreateServiceGroups + # Enable Leap -> Needs UpdateDsip + + data = deepcopy(data) + scripts = [CreateSubnetsPc(data), + CreateRoleMapping(data), + CreateNetworkSecurityPolicy(data), + EnableLeap(data)] + tertiary_batch_scripts.add_all(scripts) + return tertiary_batch_scripts + + @staticmethod + def configure_dr_scripts(data: dict) -> BatchScript: + quaternary_batch_scripts = BatchScript() + # create PP -> needs EnableLeap + # create RP -> needs CreateProtectionPolicy + + data = deepcopy(data) + scripts = [CreateProtectionPolicy(data), + CreateRecoveryPlan(data)] + quaternary_batch_scripts.add_all(scripts) + return quaternary_batch_scripts + + def verify(self): + pass diff --git a/framework/scripts/python/connect_to_az_pc.py b/framework/scripts/python/connect_to_az_pc.py new file mode 100644 index 0000000..9c1cd56 --- /dev/null +++ b/framework/scripts/python/connect_to_az_pc.py @@ -0,0 +1,63 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.cloud_trust import CloudTrust +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class ConnectToAz(Script): + """ + Class that connects to AZs + """ + + def __init__(self, data: dict): + self.task_uuid_list = [] + self.data = data + self.pc_session = self.data["pc_session"] + super(ConnectToAz, self).__init__() + + def execute(self, **kwargs): + try: + cloud_trust = CloudTrust(self.pc_session) + + if not self.data.get("remote_azs"): + logger.warning(f"Skipping creation of AZ in {self.data['pc_ip']}") + return + + current_az_list = cloud_trust.list() + current_az_list = [az.get("spec", {}).get("resources", {}).get("url") + for az in current_az_list if az.get("spec", {}).get("resources", {}).get("url")] + + az_list = [] + for az, details in self.data["remote_azs"].items(): + remote_pc_ip = az + remote_pc_username = details["username"] + remote_pc_password = details["password"] + cloud_type = details.get("cloud_type", "ONPREM_CLOUD") + + if remote_pc_ip in current_az_list: + logger.warning(f"{remote_pc_ip} AZ is already added in {self.data['pc_ip']}!") + return + + try: + spec = CloudTrust.get_payload(cloud_type, remote_pc_ip, remote_pc_username, remote_pc_password) + az_list.append(spec) + except Exception as e: + self.exceptions.append(f"Failed to add remote AZ {remote_pc_ip}: {e}") + + logger.info(f"Batch create AZs creation in {self.data['pc_ip']}") + self.task_uuid_list = cloud_trust.batch_op.batch_create(request_payload_list=az_list) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=self.task_uuid_list).monitor() + + if app_response: + self.pass_rate.append(f"Some tasks have failed. {app_response}") + + if not status: + self.pass_rate.append("Timed out. Creation of AZs in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/create_address_groups_pc.py b/framework/scripts/python/create_address_groups_pc.py new file mode 100644 index 0000000..9ce70b3 --- /dev/null +++ b/framework/scripts/python/create_address_groups_pc.py @@ -0,0 +1,51 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.v3.address_group import AddressGroup +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateAddressGroups(Script): + """ + Class that creates Address Groups + """ + def __init__(self, data: dict): + self.task_uuid_list = None + self.data = data + self.address_groups = self.data.get("address_groups") + self.pc_session = self.data["pc_session"] + super(CreateAddressGroups, self).__init__() + + def execute(self, **kwargs): + try: + address_group = AddressGroup(self.pc_session) + address_group_list = address_group.list() + address_group_name_list = [ag.get("address_group", {}).get("name") + for ag in address_group_list if ag.get("address_group", {}).get("name")] + + if not self.address_groups: + logger.warning(f"No address_groups to create in {self.data['pc_ip']}. Skipping...") + return + + ags_to_create = [] + for ag in self.address_groups: + if ag["name"] in address_group_name_list: + logger.warning(f"{ag['name']} already exists in {self.data['pc_ip']}!") + continue + try: + ags_to_create.append(address_group.create_address_group_spec(ag)) + except Exception as e: + self.exceptions.append(f"Failed to create address_group {ag['name']}: {e}") + + if not ags_to_create: + logger.warning(f"No address_groups to create in {self.data['pc_ip']}. Skipping...") + return + + logger.info(f"Batch create Address groups in {self.data['pc_ip']}") + address_group.batch_op.batch_create(request_payload_list=ags_to_create) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + # todo verify + pass diff --git a/framework/scripts/python/create_bp_calm.py b/framework/scripts/python/create_bp_calm.py new file mode 100644 index 0000000..ac658f7 --- /dev/null +++ b/framework/scripts/python/create_bp_calm.py @@ -0,0 +1,37 @@ +from calm.dsl.api import get_api_client +from helpers.log_utils import get_logger +from scripts.python.script import Script +from calm.dsl.cli import create_blueprint_from_dsl + +logger = get_logger(__name__) + + +class CreateBp(Script): + def __init__(self, data: dict): + self.data = data + super(CreateBp, self).__init__() + + def execute(self, **kwargs): + try: + # Get the BPs list + for bp in self.data["bp_list"]: + logger.info(f"Creating Blueprint {bp['name']}") + bp_file = f"{self.data['project_root']}/{bp['dsl_file']}" + + try: + client = get_api_client() + + create_blueprint_from_dsl( + client=client, + name=bp['name'], + force_create=True, + bp_file=bp_file + ) + logger.info(f"Created {bp['name']} successfully!") + except Exception as e: + self.exceptions.append(f"Failed to create BP {bp_file}: {e}") + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + pass diff --git a/framework/scripts/python/create_calm_application_from_dsl.py b/framework/scripts/python/create_calm_application_from_dsl.py new file mode 100644 index 0000000..264c81c --- /dev/null +++ b/framework/scripts/python/create_calm_application_from_dsl.py @@ -0,0 +1,48 @@ +import logging +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.application_state_monitor import ApplicationStateMonitor +from scripts.python.script import Script +from calm.dsl.cli import create_app +from scripts.python.helpers.v3.application import Application + +logger = get_logger(__name__) + + +class CreateAppFromDsl(Script): + def __init__(self, data: dict): + self.data = data + + def execute(self, **kwargs): + session = RestAPIUtil(self.data["pc_ip"], user=self.data["pc_username"], pwd=self.data["pc_password"], + port="9440", secured=True) + + # Get the BPs list + for bp in self.data["bp_list"]: + logger.info(f"Creating Blueprint and Launching app {bp['app_name']}") + bp_file = f"{self.data['project_root']}/{bp['dsl_file']}" + logging.info(f"Creating app {bp['app_name']} from the blueprint {bp['name']}") + create_app( + app_name=bp['app_name'], + bp_file=bp_file + ) + + # Monitoring application status + application_op = Application(session) + application_uuid = application_op.get_uuid_by_name(bp['app_name']) + + if application_uuid: + logger.info("Application is being provisioned") + app_response, status = ApplicationStateMonitor(session, + application_uuid=application_uuid).monitor() + if not status or not app_response: + raise Exception("Application deployment failed") + else: + logger.info("Application deployment successful") + else: + logger.warning("Could not fetch application uuid to monitor. Application might or " + "might not be running") + raise Exception("Stopped") + + def verify(self, **kwargs): + pass diff --git a/framework/scripts/python/create_container_pe.py b/framework/scripts/python/create_container_pe.py new file mode 100644 index 0000000..9e2b62a --- /dev/null +++ b/framework/scripts/python/create_container_pe.py @@ -0,0 +1,39 @@ +from scripts.python.cluster_script import ClusterScript +from scripts.python.helpers.v1.container import Container +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class CreateContainerPe(ClusterScript): + """ + Create Storage container in the give clusters + """ + def __init__(self, data: dict, **kwargs): + super(CreateContainerPe, self).__init__(data, **kwargs) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + pe_session = cluster_details["pe_session"] + + try: + if cluster_details.get("containers"): + for container in cluster_details["containers"]: + container_op = Container(pe_session) + # todo pre-checks if they are valid + container_op.create(**container) + else: + logger.info(f"No containers specified in '{cluster_ip}/ {cluster_details['cluster_info']['name']}'." + " Skipping...") + except Exception as e: + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + self.exceptions.append(f"{type(self).__name__} failed for the cluster " + f"{cluster_info} with the error: {e}") + return + + def verify(self): + # todo do we need to verify these? + pass diff --git a/framework/scripts/python/create_pc_categories.py b/framework/scripts/python/create_pc_categories.py new file mode 100644 index 0000000..c791c6d --- /dev/null +++ b/framework/scripts/python/create_pc_categories.py @@ -0,0 +1,70 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.v3.category import Category +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateCategoryPc(Script): + """ + Class that creates Categories in PC + """ + def __init__(self, data: dict): + self.response = None + self.data = data + self.pc_session = self.data["pc_session"] + super(CreateCategoryPc, self).__init__() + + def execute(self, **kwargs): + try: + category = Category(self.pc_session) + existing_categories_list = category.categories_with_values() + + if not self.data.get("categories"): + logger.warning(f"Skipping category creation in {self.data['pc_ip']}") + return + + category_list = [] + for category_to_create in self.data["categories"]: + name = category_to_create.get("name") + description = category_to_create.get("description") + values = category_to_create.get("values") + + category_exists = next((existing_category for existing_category in existing_categories_list + if existing_category["name"] == name), None) + + try: + # Category is already there, just need to add values to the category + if category_exists: + values = [value_to_create for value_to_create in values + if value_to_create not in category_exists["values"]] + else: + # create category first + data = { + "name": name, + "description": description + } + # We are using update as we need to use PUT to create categories + category.update(endpoint=name, data=data) + + if values: + # add values to the category + category_list.append({ + "name": name, + "values": values + }) + except Exception as e: + self.exceptions.append(f"Failed to create category {name}: {e}") + + if not category_list: + logger.warning(f"No categories to create in {self.data['pc_ip']}") + return + + logger.info(f"Batch create Categories in {self.data['pc_ip']}") + category.batch_values_add(category_list) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + # todo how to verify? + pass diff --git a/framework/scripts/python/create_pc_subnets.py b/framework/scripts/python/create_pc_subnets.py new file mode 100644 index 0000000..526b6ea --- /dev/null +++ b/framework/scripts/python/create_pc_subnets.py @@ -0,0 +1,76 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.cluster import Cluster as PcCluster +from scripts.python.helpers.v3.network import Network +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateSubnetsPc(Script): + """ + Class that creates subnets in PC + """ + + def __init__(self, data: dict): + self.task_uuid_list = [] + self.data = data + self.pc_session = self.data["pc_session"] + # pass the Cluster Objects + # create_pe_pc helper function can be used + self.pe_clusters = self.data.get("clusters", {}) + super(CreateSubnetsPc, self).__init__() + + def execute(self, **kwargs): + try: + network = Network(session=self.pc_session) + subnets_to_create = [] + + for _, cluster_details in self.pe_clusters.items(): + for subnet_info in cluster_details.get("networks", []): + cluster_name = cluster_details["cluster_info"]["name"] + cluster_uuid = cluster_details["cluster_info"].get("uuid") + + if not cluster_uuid: + pc_cluster = PcCluster(self.pc_session) + pc_cluster.get_pe_info_list() + cluster_uuid = pc_cluster.name_uuid_map.get(cluster_name) + if not cluster_uuid: + self.exceptions.append(f"Failed to create subnets in {cluster_name}") + continue + + filter_criteria = f"cluster_name=={cluster_name};vlan_id=={subnet_info['vlan_id']}" + + subnets_response = network.list(filter=filter_criteria) + + if len(subnets_response) > 0: + logger.warning(f"Skipping Subnet creation. Subnet {subnet_info['name']} with vlanId " + f"{subnet_info['vlan_id']}, already exists in the cluster {cluster_name}") + else: + + try: + # add cluster_uuid + payload = network.create_pc_subnet_payload(cluster_uuid=cluster_uuid, **subnet_info) + subnets_to_create.append(payload) + except Exception as e: + self.exceptions.append(f"Failed to create subnets {subnet_info['name']}: {e}") + + if not subnets_to_create: + logger.warning(f"No subnets to create in {self.data['pc_ip']}") + return + + logger.info(f"Batch create subnets in {self.data['pc_ip']}") + self.task_uuid_list = network.batch_create_network(subnets_to_create) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=self.task_uuid_list).monitor() + + if app_response: + self.exceptions.append(f"Some tasks have failed. {app_response}") + + if not status: + self.exceptions.append("Timed out. Creation of subnets in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/create_protection_policy_pc.py b/framework/scripts/python/create_protection_policy_pc.py new file mode 100644 index 0000000..e66b3af --- /dev/null +++ b/framework/scripts/python/create_protection_policy_pc.py @@ -0,0 +1,82 @@ +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.cluster import Cluster as PcCluster +from scripts.python.helpers.v3.protection_rule import ProtectionRule +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateProtectionPolicy(Script): + """ + Class that creates PP + """ + def __init__(self, data: dict): + self.task_uuid_list = None + self.data = data + self.pc_session = self.data["pc_session"] + super(CreateProtectionPolicy, self).__init__() + + def execute(self, **kwargs): + try: + protection_policy = ProtectionRule(self.pc_session) + protection_policy_list = protection_policy.list() + protection_policy_name_list = [pp.get("spec", {}).get("name") + for pp in protection_policy_list if pp.get("spec", {}).get("name")] + + if not self.data.get("protection_rules"): + logger.warning(f"Skipping creation of Protection policies in {self.data['pc_ip']}") + return + + source_pc_cluster = PcCluster(self.pc_session) + source_pc_cluster.get_pe_info_list() + source_pe_clusters = { + self.data["pc_ip"]: source_pc_cluster.name_uuid_map + } + + if not self.data.get("remote_azs"): + logger.warning(f"AZs are to be provided in {self.data['pc_ip']}") + return + + remote_pe_clusters = {} + for az, details in self.data["remote_azs"].items(): + remote_pc_username = details["username"] + remote_pc_password = details["password"] + remote_pc_cluster = PcCluster( + RestAPIUtil(az, user=remote_pc_username, pwd=remote_pc_password, port="9440", secured=True)) + remote_pc_cluster.get_pe_info_list() + remote_pe_clusters[az] = remote_pc_cluster.name_uuid_map + + pp_list = [] + for pp in self.data["protection_rules"]: + if pp['name'] in protection_policy_name_list: + logger.warning(f"{pp['name']} already exists in {self.data['pc_ip']}!") + continue + + try: + spec = protection_policy.get_payload(pp, source_pe_clusters, remote_pe_clusters) + pp_list.append(spec) + except Exception as e: + self.exceptions.append(f"Failed to create Protection policy {pp['name']}: {e}") + + if not pp_list: + logger.warning(f"Provided PPs are already created in {self.data['pc_ip']}") + return + + logger.info(f"Batch create Protection policies in {self.data['pc_ip']}") + self.task_uuid_list = protection_policy.batch_op.batch_create(request_payload_list=pp_list) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=self.task_uuid_list).monitor() + + if app_response: + self.exceptions.append(f"Some tasks have failed. {app_response}") + + if not status: + self.exceptions.append( + "Timed out. Creation of Protection policies in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/create_recovery_plan.py b/framework/scripts/python/create_recovery_plan.py new file mode 100644 index 0000000..f435250 --- /dev/null +++ b/framework/scripts/python/create_recovery_plan.py @@ -0,0 +1,72 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.cluster import Cluster as PcCluster +from scripts.python.helpers.v3.recovery_plan import RecoveryPlan +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateRecoveryPlan(Script): + """ + Class that creates RP + """ + def __init__(self, data: dict): + self.task_uuid_list = None + self.data = data + self.pc_session = self.data["pc_session"] + super(CreateRecoveryPlan, self).__init__() + + def execute(self, **kwargs): + try: + recovery_plan = RecoveryPlan(self.pc_session) + recovery_plan_list = recovery_plan.list() + recovery_plan_name_list = [rp.get("spec", {}).get("name") + for rp in recovery_plan_list if rp.get("spec", {}).get("name")] + + if not self.data.get("recovery_plans"): + logger.warning(f"Skipping creation of Recovery plans in {self.data['pc_ip']}") + return + + source_pc_cluster = PcCluster(self.pc_session) + source_pc_cluster.get_pe_info_list() + source_pe_clusters = { + self.data["pc_ip"]: source_pc_cluster.name_uuid_map + } + + if not self.data.get("remote_azs"): + logger.warning(f"AZs are to be provided in {self.data['pc_ip']}") + return + + rp_list = [] + for rp in self.data["recovery_plans"]: + if rp['name'] in recovery_plan_name_list: + logger.warning(f"{rp['name']} already exists in {self.data['pc_ip']}!") + continue + + try: + spec = recovery_plan.get_payload(rp, source_pe_clusters) + rp_list.append(spec) + except Exception as e: + self.exceptions.append(f"Failed to create Recovery plan {rp['name']}: {e}") + + if not rp_list: + logger.warning(f"Provided RPs are already created in {self.data['pc_ip']}") + return + + logger.info(f"Batch create Recovery plans in {self.data['pc_ip']}") + self.task_uuid_list = recovery_plan.batch_op.batch_create(request_payload_list=rp_list) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=self.task_uuid_list).monitor() + + if app_response: + self.exceptions.append(f"Some tasks have failed. {app_response}") + + if not status: + self.exceptions.append( + "Timed out. Creation of Recovery plans in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/create_rolemapping_pe.py b/framework/scripts/python/create_rolemapping_pe.py new file mode 100644 index 0000000..2000421 --- /dev/null +++ b/framework/scripts/python/create_rolemapping_pe.py @@ -0,0 +1,61 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.v1.authentication import AuthN +from scripts.python.cluster_script import ClusterScript + +logger = get_logger(__name__) + + +class CreateRoleMapping(ClusterScript): + """ + The Script to create role mapping in PE clusters + """ + LOAD_TASK = False + DEFAULT_ROLE_MAPPINGS = [ + { + "role_type": "ROLE_CLUSTER_ADMIN", + "entity_type": "OU", + "values": ["admin"] + }, + { + "role_type": "ROLE_USER_ADMIN", + "entity_type": "OU", + "values": ["user"] + }, + { + "role_type": "ROLE_CLUSTER_VIEWER", + "entity_type": "OU", + "values": ["viewer"] + } + ] + + def __init__(self, data: dict, **kwargs): + super(CreateRoleMapping, self).__init__(data, **kwargs) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + pe_session = cluster_details["pe_session"] + + authn = AuthN(pe_session) + authn_payload = cluster_details.get("directory_services") + + if not authn_payload["ad_name"] and authn_payload["role_mappings"]: + logger.warning(f"Authentication payload not specified for the cluster " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + return + try: + authn.create_role_mapping( + directory_name=authn_payload["ad_name"], + role_mappings=authn_payload["role_mappings"], + cluster_info=f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + ) + except Exception as e: + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + logger.error(f"{type(self).__name__} failed for the cluster {cluster_info} with the error: {e}") + return + + def verify(self, **kwargs): + # todo check if verifications is needed? + pass diff --git a/framework/scripts/python/create_security_policy_pc.py b/framework/scripts/python/create_security_policy_pc.py new file mode 100644 index 0000000..ba38aae --- /dev/null +++ b/framework/scripts/python/create_security_policy_pc.py @@ -0,0 +1,61 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.security_rule import SecurityPolicy +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateNetworkSecurityPolicy(Script): + """ + Class that creates Address Groups + """ + + def __init__(self, data: dict): + self.task_uuid_list = None + self.data = data + self.security_policies = self.data.get("security_policies") + self.pc_session = self.data["pc_session"] + super(CreateNetworkSecurityPolicy, self).__init__() + + def execute(self, **kwargs): + try: + security_policy = SecurityPolicy(self.pc_session) + security_policy_list = security_policy.list(length=10000) + security_policy_name_list = [sp.get("spec").get("name") + for sp in security_policy_list if sp.get("spec", {}).get("name")] + + if not self.security_policies: + logger.warning(f"No security_policies to create in {self.data['pc_ip']}. Skipping...") + return + + sps_to_create = [] + for sg in self.security_policies: + if sg["name"] in security_policy_name_list: + logger.warning(f"{sg['name']} already exists in {self.data['pc_ip']}!") + continue + try: + sps_to_create.append(security_policy.create_security_policy_spec(sg)) + except Exception as e: + self.exceptions.append(f"Failed to create Security policy {sg['name']}: {e}") + + if not sps_to_create: + logger.warning(f"No security_policies to create in {self.data['pc_ip']}. Skipping...") + return + + logger.info(f"Batch create Security Policies in {self.data['pc_ip']}") + self.task_uuid_list = security_policy.batch_op.batch_create(request_payload_list=sps_to_create) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=self.task_uuid_list).monitor() + + if app_response: + self.exceptions.append(f"Some tasks have failed. {app_response}") + + if not status: + self.exceptions.append("Timed out. Creation of Security policies in PC didn't happen in the" + " prescribed timeframe") diff --git a/framework/scripts/python/create_service_groups_pc.py b/framework/scripts/python/create_service_groups_pc.py new file mode 100644 index 0000000..7f552c4 --- /dev/null +++ b/framework/scripts/python/create_service_groups_pc.py @@ -0,0 +1,52 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.v3.service_group import ServiceGroup +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateServiceGroups(Script): + """ + Class that creates Address Groups + """ + + def __init__(self, data: dict): + self.task_uuid_list = None + self.data = data + self.service_groups = self.data.get("service_groups") + self.pc_session = self.data["pc_session"] + super(CreateServiceGroups, self).__init__() + + def execute(self, **kwargs): + try: + service_group = ServiceGroup(self.pc_session) + service_group_list = service_group.list(length=10000) + service_group_name_list = [ag.get("service_group", {}).get("name") + for ag in service_group_list if ag.get("service_group", {}).get("name")] + + if not self.service_groups: + logger.warning(f"No service_groups to create in {self.data['pc_ip']}. Skipping...") + return + + sgs_to_create = [] + for sg in self.service_groups: + if sg["name"] in service_group_name_list: + logger.warning(f"{sg['name']} already exists!") + continue + try: + sgs_to_create.append(service_group.create_service_group_spec(sg)) + except Exception as e: + self.exceptions.append(f"Failed to create Security policy {sg['name']}: {e}") + + if not sgs_to_create: + logger.warning(f"No service_groups to create in {self.data['pc_ip']}. Skipping...") + return + + logger.info(f"Batch create service groups in {self.data['pc_ip']}") + service_group.batch_op.batch_create(request_payload_list=sgs_to_create) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + # todo verify + pass diff --git a/framework/scripts/python/enable_leap_pc.py b/framework/scripts/python/enable_leap_pc.py new file mode 100644 index 0000000..2852539 --- /dev/null +++ b/framework/scripts/python/enable_leap_pc.py @@ -0,0 +1,42 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.service import Service +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class EnableLeap(Script): + """ + Class that enables Leap/ DR + """ + def __init__(self, data: dict): + self.task_uuid = None + self.data = data + self.pc_session = self.data["pc_session"] + super(EnableLeap, self).__init__() + + def execute(self, **kwargs): + try: + service = Service(self.pc_session) + status = service.get_dr_status() + + if status in ["ENABLED", "ENABLING"]: + logger.warning(f"SKIP: Leap/ DR service is already enabled {self.data['pc_ip']}") + return + + logger.info(f"Enabling Leap/ DR service {self.data['pc_ip']}") + response = service.enable_leap() + + if response.get("task_uuid"): + self.task_uuid = response.get("task_uuid") + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=[self.task_uuid]).monitor() + + if not status: + self.exceptions.append("Timed out. Enabling Leap/ DR in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/enable_microseg_pc.py b/framework/scripts/python/enable_microseg_pc.py new file mode 100644 index 0000000..37d1674 --- /dev/null +++ b/framework/scripts/python/enable_microseg_pc.py @@ -0,0 +1,42 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from scripts.python.helpers.v3.service import Service +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class EnableMicroseg(Script): + """ + Class that enables microseg/ Flow + """ + def __init__(self, data: dict): + self.task_uuid = None + self.data = data + self.pc_session = self.data["pc_session"] + super(EnableMicroseg, self).__init__() + + def execute(self, **kwargs): + try: + service = Service(self.pc_session) + status = service.get_microseg_status() + + if status in ["ENABLED", "ENABLING"]: + logger.warning(f"SKIP: microseg/ flow service is already enabled {self.data['pc_ip']}") + return + + logger.info(f"Enabling Microseg/ Flow service {self.data['pc_ip']}") + response = service.enable_microseg() + + if response.get("task_uuid"): + self.task_uuid = response.get("task_uuid") + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.task_uuid: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=[self.task_uuid]).monitor() + + if not status: + self.exceptions.append("Timed out. Enabling Microseg/ Flow in PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/foundation_script.py b/framework/scripts/python/foundation_script.py new file mode 100644 index 0000000..3f32aaf --- /dev/null +++ b/framework/scripts/python/foundation_script.py @@ -0,0 +1,369 @@ +import sys +import socket +import struct +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.fc.imaged_nodes import ImagedNode +from scripts.python.helpers.fc.imaged_clusters import ImagedCluster +from scripts.python.script import Script +from scripts.python.image_cluster_script import ImageClusterScript +from scripts.python.helpers.batch_script import BatchScript + + +logger = get_logger(__name__) + + +class FoundationScript(Script): + + def __init__(self, data: dict): + self.data = data + self.data["pc_session"] = RestAPIUtil( + data["pc_ip"], user=data["pc_username"], + pwd=data["pc_password"], port="9440", secured=True) + self.image_node = ImagedNode(self.data["pc_session"]) + self.mgmt_static_ips = {} + self.ipmi_static_ips = {} + super(FoundationScript, self).__init__() + + def ip_dict(self, total_cluster_size: int, ip_range: list, ip_category: str, cluster_name: str = ""): + """Returns list of IP Dictionaries within the given IP range + + Args: + total_cluster_size (int): Cluster size + ip_range (str): ip range to separated by - + ip_category (str): IP category: management or ipmi + + Returns: + list: list of IP Dictionaries within the given IP range + """ + if ip_range: + start = struct.unpack('>I', socket.inet_aton(ip_range[0]))[0] + end = struct.unpack('>I', socket.inet_aton(ip_range[-1]))[0] + ip_list = [socket.inet_ntoa(struct.pack('>I', i)) for i in range(start, end+1)] + error = self.validate_ip(total_cluster_size, len(ip_list), ip_category, cluster_name) + if error: + return None, error + ip_dict = {ip: True for ip in ip_list} + return ip_dict, "" + else: + return {}, "" + + def validate_ip(self, total_cluster_size: int, num_ips: int, ip_category: str, cluster_name: str): + """Validate the given IPs with number of IPs required + + Args: + num_ips (int): number of IPs in ip range + ip_category (str): IP category: management or ipmi + """ + if ip_category == "management": + required_ips = total_cluster_size * 2 + elif ip_category == "ipmi": + required_ips = total_cluster_size + if num_ips < required_ips: + error = "Insufficient {0} IPs for imaging cluster(s) {1}. Required IPs: {2}. Provided IPs: {3}.\n".format( + ip_category, cluster_name, required_ips, num_ips) + return error + + def get_free_ip(self, ip_dict: dict, common_ip_dict: dict = None): + """get free ip from ip_dict based on availability + + Args: + ip_dict (list): list of IP Dictionaries + common_ip_dict (dict): Site level ip mapping for management/ipmi to be updated + + Returns: + str: Free IP + """ + for ip, available in ip_dict.items(): + if available: + ip_dict[ip] = False + if common_ip_dict: + common_ip_dict[ip] = False + return ip + else: + logger.error("No Free IP available") + return None + + def get_cluster_ip_mappings(self, cluster_info: dict, cluster_name: str): + """Populate static ip dictionary with the provided ip range for each cluster or site + + Args: + cluster_info (dict): Cluster information provided + + Returns: + (dict, dict): Management & IPMI static ip dictionary + """ + if cluster_info.get("network", None): + cluster_mgmt_ips, mgmt_ip_error = self.ip_dict(cluster_info["cluster_size"], cluster_info["network"]["mgmt_static_ips"], + ip_category="management", cluster_name=cluster_name) + ipmi_static_ips, ipmi_ip_error = self.ip_dict(cluster_info["cluster_size"], cluster_info["network"]["ipmi_static_ips"], + ip_category="ipmi", cluster_name=cluster_name) + if mgmt_ip_error or ipmi_ip_error: + error = mgmt_ip_error+ipmi_ip_error + return None, None, error + else: + cluster_mgmt_ips = self.mgmt_static_ips + ipmi_static_ips = self.ipmi_static_ips + return cluster_mgmt_ips, ipmi_static_ips, None + + def get_nodes_from_blockserial(self, block_serial_nodes: list, cluster_size: int): + """Get node details based on block_serials + + Args: + block_serials (list): list of block_serials + cluster_size (int): Cluster size + + Returns: + (list, str): (List of node details, Error Message) + """ + node_details = [] + for node in block_serial_nodes: + if node["available"] and node["imaged_node_uuid"] not in self.assigned_nodes: + if cluster_size > 2: + node_details.append(node) + self.assigned_nodes.append(node["imaged_node_uuid"]) + elif cluster_size == 1 and node["hardware_attributes"].get("one_node_cluster"): + node_details.append(node) + self.assigned_nodes.append(node["imaged_node_uuid"]) + elif cluster_size == 2 and node["hardware_attributes"].get("two_node_cluster"): + node_details.append(node) + self.assigned_nodes.append(node["imaged_node_uuid"]) + if len(node_details) == cluster_size: + logger.info("Got {} available node details".format(len(node_details))) + logger.debug("Node details: {}".format(node_details)) + return node_details, None + else: + return None, "Not enough available nodes found in block serials." + + def update_node_details(self, node_list: list, cluster_name: str, cluster_info: dict): + """Update the node details with provided site details + + Args: + node_list (list): List of node details with updated params + """ + updated_node_list = [] + if cluster_info.get("re-image", self.data["re-image"]) and cluster_info["cluster_size"] == 1: + logger.warning("Re-imaging for 1 node is not supported. Creating cluster {} without imaging.".format(cluster_name)) + for node in node_list: + if not cluster_info.get("use_existing_network_settings", self.data["use_existing_network_settings"]): + network = cluster_info["network"] if cluster_info.get("network", None) else self.data["network"] + mgmt_netmask, mgmt_gateway, ipmi_netmask, ipmi_gateway = self.get_netmask_gateway(network, node) + node_spec = { + "hypervisor_ip": self.get_free_ip(cluster_info["cluster_mgmt_ips"], self.mgmt_static_ips), + "hypervisor_netmask": mgmt_netmask, + "hypervisor_gateway": mgmt_gateway, + "cvm_ip": self.get_free_ip(cluster_info["cluster_mgmt_ips"], self.mgmt_static_ips), + "cvm_netmask": mgmt_netmask, + "cvm_gateway": mgmt_gateway, + "ipmi_netmask": ipmi_netmask, + "rdma_passthrough": cluster_info.get("rdma_passthrough", False), + "cvm_vlan_id": cluster_info.get("cvm_vlan_id", None), + "hypervisor_type": self.data["imaging_parameters"]["hypervisor_type"], + "image_now": cluster_info.get("re-image", self.data.get("re-image", False)) if cluster_info["cluster_size"] > 1 else False, + "ipmi_ip": self.get_free_ip(cluster_info["ipmi_static_ips"], self.ipmi_static_ips) if cluster_info["ipmi_static_ips"] else node["ipmi_ip"], + "cvm_ram_gb": cluster_info.get("cvm_ram", 12), + "use_existing_network_settings": False, + "ipmi_gateway": ipmi_gateway, + } + elif cluster_info.get("use_existing_network_settings", self.data["use_existing_network_settings"]): + node_spec = { + "use_existing_network_settings": True, + "imaged_node_uuid": node["imaged_node_uuid"], + "image_now": cluster_info.get("re-image", self.data.get("re-image", False)) if cluster_info["cluster_size"] > 1 else False, + } + node.update(node_spec) + updated_node_list.append(node) + return updated_node_list + + def update_aos_ahv_spec(self): + """Update hypervisor iso details + """ + return { + "aos_package_url": self.data["aos_url"], + "hypervisor_iso_details": { + "hypervisor_type": self.data["imaging_parameters"]["hypervisor_type"], + "url": self.data["hypervisor_url"] + } + } + + def update_common_network_settings(self): + """ Update common network settings + """ + return { + "cvm_dns_servers": self.data["global_network"]["dns_servers"], + "hypervisor_dns_servers": self.data["global_network"]["dns_servers"], + "cvm_ntp_servers": self.data["global_network"]["ntp_servers"], + "hypervisor_ntp_servers": self.data["global_network"]["ntp_servers"], + } + + def get_node_detail_by_node_serial(self, node_serial_list: list, cluster_size: int): + """Fetch Node details based on node serial list + + Args: + node_serial_list (list): List of node serials + cluster_size (int): Cluster size + + Returns: + (list, str): (List of node details, Error Message) + """ + node_details = [] + for node in self.node_list: + if node["node_serial"] in node_serial_list: + node_details.append(node) + self.node_list.remove(node) + self.assigned_nodes.append(node["imaged_node_uuid"]) + if cluster_size == len(node_details): + return node_details, None + else: + # todo: Improve error logging, by checking if this node is assigned to different cluster or not discovered in FC + return None, "Not enough available nodes found in Foundation Central for given node_serails: {0}".format(node_serial_list) + + def get_cluster_data(self, cluster_name: str, cluster_info: dict, block_node_list: list = None): + """Create Cluster data for each cluster + + Args: + cluster_name (list): Cluster name + cluster_info (dict): Cluster info provided in json + block_node_list (list): List of nodes details in given block serials + + Returns: + (list, str): (List of node details, Error Message) + """ + cluster_data = { + "cluster_external_ip": cluster_info["cluster_vip"], + "redundancy_factor": cluster_info["redundancy_factor"], + "cluster_name": cluster_name, + "cluster_size": cluster_info["cluster_size"] + } + if cluster_info.get("node_serials", None): + if cluster_info["cluster_size"] > len(cluster_info["node_serials"]): + error = "Insufficient node serials {0} for cluster {1} size {2}".format(cluster_info["node_serials"], cluster_name, cluster_info["cluster_size"]) + else: + node_list, error = self.get_node_detail_by_node_serial(cluster_info["node_serials"], cluster_info["cluster_size"]) + else: + node_list, error = self.get_nodes_from_blockserial(block_node_list, cluster_info["cluster_size"]) + if error: + return None, error + if node_list: + cluster_data["nodes_list"] = self.update_node_details(node_list, cluster_name, cluster_info) + if cluster_info.get("re-image", self.data["re-image"]) and cluster_info["cluster_size"] > 1: + cluster_data.update(self.update_aos_ahv_spec()) + return cluster_data, None + + def get_site_cluster_data(self): + """ + Generate cluster data for all the clusters in the site + + Returns: + (list): List of cluster datas for the given site + """ + cluster_data_list = [] + total_cluster_size = 0 + self.node_list, error = self.image_node.node_details() + + # Dictionaries to store cluster with and without node details separately + node_serial_cluster_data = {} + block_serial_cluster_data = {} + + if error: + logger.error(error) + sys.exit(1) + if self.node_list: + self.imaging = ImagedCluster(self.data["pc_session"]) + for cluster_name, cluster_info in self.data["clusters"].items(): + if cluster_info.get("node_serials", None): + node_serial_cluster_data[cluster_name] = cluster_info + else: + block_serial_cluster_data[cluster_name] = cluster_info + if not cluster_info.get("network"): + # Calculating total cluster size to check sufficient management & ipmi IPs + total_cluster_size += cluster_info["cluster_size"] + common_network_settings = self.update_common_network_settings() + + # Adding node_serail & block_serial cluster data to ordered dict, the cluster with node serial will be added first for cluster deployment + site_cluster_info = {} + site_cluster_info.update(node_serial_cluster_data) + + # Get the static ip list for site static ip list + static_ip_error = None + if not self.data["use_existing_network_settings"]: + self.mgmt_static_ips, mgmt_ip_error = self.ip_dict(total_cluster_size, self.data["network"]["mgmt_static_ips"], ip_category="management") + self.ipmi_static_ips, ipmi_ip_error = self.ip_dict(total_cluster_size, self.data["network"]["ipmi_static_ips"], ip_category="ipmi") + if mgmt_ip_error or ipmi_ip_error: + static_ip_error = mgmt_ip_error+ipmi_ip_error + self.exceptions.append("Skipping Cluster deployment which shares site level netowrk. Reason: {}".format(static_ip_error)) + # Exclusing cluster deployment which share site level netowrk for deployment if there is static ip error + if not static_ip_error: + site_cluster_info.update(block_serial_cluster_data) + + # todo: order block_serial_cluster_data based on cluster size + # block_serial_cluster_data = sorted(block_serial_cluster_data.items(), key=lambda x: x[1]['cluster_size'], reverse=False) + + # Get the nodes based on block serials provided + node_list, error = self.image_node.node_details_by_block_serial(self.data["blocks_serial_numbers"]) + if error: + self.exceptions.append(error) + if node_list: + sorted_imaged_nodes = sorted(node_list, key=lambda i: (i['block_serial'], i['node_position'])) + + # Gathering data for clusters with node serials + for cluster_name, cluster_info in site_cluster_info.items(): + cluster_mgmt_ips, ipmi_static_ips, error = self.get_cluster_ip_mappings(cluster_info, cluster_name) + if not error: + cluster_info["cluster_mgmt_ips"] = cluster_mgmt_ips + cluster_info["ipmi_static_ips"] = ipmi_static_ips + cluster_data, error = self.get_cluster_data(cluster_name, cluster_info, sorted_imaged_nodes) + if error: + self.exceptions.append(error) + else: + cluster_data["common_network_settings"] = common_network_settings + cluster_data_list.append(cluster_data) + else: + self.exceptions.append(error) + return cluster_data_list + + def execute(self, **kwargs): + """Run Image cluster nodes + """ + logger.debug(self.data) + self.assigned_nodes = [] + cluster_data_list = self.get_site_cluster_data() + image_cluster_script = BatchScript(parallel=True) + for cluster_data in cluster_data_list: + image_cluster_script.add(ImageClusterScript(data=self.data, cluster_data=cluster_data, imaging_obj=self.imaging)) + image_cluster_script.run() + + def verify(self): + pass + + @staticmethod + def validate_redundancy_factor(cluster_size: int, redundancy_factor: int): + """Validate the redundancy factor for a given cluster based on cluster size and redundancy factor + + Args: + cluster_size (int): Cluster size + redundancy_factor (int): Redundancy factor + """ + if redundancy_factor not in [2, 3]: + logger.error('Invalid redundancy factor {}. Nutanix supports redundancy factor 2, and also redundancy factor 3 only if the cluster has 5+ nodes'.format(redundancy_factor)) + sys.exit(1) + elif redundancy_factor == 3 and cluster_size < 5: + logger.error('With redundancy factor {0}, cluster size must be 5 or more and you selected {1} which is not compatible!'.format(redundancy_factor, cluster_size)) + sys.exit(1) + + @staticmethod + def get_netmask_gateway(network, node: dict): + """Get the Netmask Gateway for Management and IPMI network + + Args: + network (dict): Management & IPMI network + node (dict): Discovered Node detail + + Returns: + tuple: Netmask Gateway for management & IPMI network + """ + mgmt_netmask = network["mgmt_netmask"] if network["mgmt_netmask"] else node["mgmt_netmask"] + mgmt_gateway = network["mgmt_gateway"] if network["mgmt_gateway"] else node["mgmt_gateway"] + ipmi_netmask = network["ipmi_netmask"] if network["ipmi_netmask"] else node["ipmi_netmask"] + ipmi_gateway = network["ipmi_gateway"] if network["ipmi_gateway"] else node["ipmi_gateway"] + return mgmt_netmask, mgmt_gateway, ipmi_netmask, ipmi_gateway diff --git a/framework/scripts/python/helpers/__init__.py b/framework/scripts/python/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/batch_script.py b/framework/scripts/python/helpers/batch_script.py new file mode 100644 index 0000000..9ba3584 --- /dev/null +++ b/framework/scripts/python/helpers/batch_script.py @@ -0,0 +1,106 @@ +import concurrent.futures +import multiprocessing +from helpers.log_utils import get_logger +from scripts.python.script import Script + +logger = get_logger(__name__) + + +class BatchScript(Script): + """ + We can group scripts together and execute them in serial or parallel + """ + + def __init__(self, parallel: bool = False, **kwargs): + """ + Constructor for BatchScript. + Args: + kwargs(dict): + parallel(bool, optional): Whether to run the sub-steps in parallel, + default value is False + """ + self.script_list = [] + self._parallel = parallel + self.num_total_scripts = 0 + self.num_passed_scripts = 0 + self.pass_rate = 0.0 + super(BatchScript, self).__init__() + + def add(self, script): + """ + Add one script + + Args: + script(Operation): The script object to be added to the group. + + Returns: + None + """ + if not script: + logger.error("Script is none, returning.") + return + self.script_list.append(script) + + def add_all(self, script_list): + """ + Add a list of scripts + Args: + script_list(list): A list of scripts to be added. + + Returns: + None + """ + if script_list: + for op in script_list: + self.add(op) + + def run(self): + """ + Execute all the scripts in sequential or parallel + + Returns: + None + """ + + if self._parallel: + self._parallel_execute() + else: + self._sequential_execute() + + def _sequential_execute(self): + """ + Execute all the steps in sequential. + + Returns + None + """ + for script in self.script_list: + try: + script.run() + except Exception as e: + logger.error(e) + + def _parallel_execute(self): + """ + Execute all the scripts in parallel. + + Returns: + None + """ + # Get the number of available CPU cores + num_cores = multiprocessing.cpu_count() + + # Set the value of max_workers based on the number of CPU cores + max_workers = num_cores + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + for result in executor.map(lambda script: script.run(), self.script_list): + try: + logger.debug(result) + except Exception as e: + logger.error(e) + + def execute(self): + pass + + def verify(self, **kwargs): + pass diff --git a/framework/scripts/python/helpers/entity.py b/framework/scripts/python/helpers/entity.py new file mode 100644 index 0000000..2d18d1c --- /dev/null +++ b/framework/scripts/python/helpers/entity.py @@ -0,0 +1,192 @@ +import copy +import sys +from base64 import b64encode +from typing import Union, List, Dict +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse +from helpers.rest_utils import RestAPIUtil +from helpers.general_utils import intersection +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Entity: + entities_limitation = 20 + entity_type = "entities" + port = "9440" + + def __init__( + self, + ip=None, + username=None, + password=None, + resource_type=None, + session=None, + headers=None, + scheme="https" + ): + self.build_spec_methods = None + secured = True if scheme == "https" else False + self.session = session if session else RestAPIUtil( + ip, user=username, pwd=password, port=self.port, headers=headers, secured=secured) + self.resource = resource_type + + def read( + self, + uuid=None, + endpoint=None, + query=None, + timeout=30, + ): + uri = self.resource + "/{0}".format(uuid) if uuid else self.resource + if endpoint: + uri = uri + "/{0}".format(endpoint) + if query: + uri = self._build_url_with_query(uri, query) + resp = self.session.get(uri, timeout=timeout) + + if self.entity_type in resp: + resp = resp[self.entity_type] + return resp + + def create( + self, + data=None, + endpoint=None, + query=None, + jsonify=True, + timeout=30 + ): + uri = self.resource + "/{0}".format(endpoint) if endpoint else self.resource + if query: + uri = self._build_url_with_query(uri, query) + return self.session.post( + uri, + data=data, + jsonify=jsonify, + timeout=timeout + ) + + def update( + self, + data=None, + endpoint=None, + query=None, + timeout=30 + ): + uri = self.resource + "/{0}".format(endpoint) if endpoint else self.resource + if query: + uri = self._build_url_with_query(uri, query) + return self.session.put( + uri, + data=data, + timeout=timeout + ) + + def list( + self, + endpoint=None, + use_base_url=False, + data=None, + custom_filters=None, + timeout=30, + entity_type=None + ) -> Union[List, Dict]: + uri = self.resource if use_base_url else self.resource + "/list" + if endpoint: + uri = uri + "/{0}".format(endpoint) + + resp = self.session.post(uri, data=data, timeout=timeout) + + entity_type = entity_type if entity_type else self.entity_type + if custom_filters: + entities_list = self._filter_entities(resp[entity_type], custom_filters) + resp = entities_list + + if entity_type in resp: + resp = resp[entity_type] + return resp + + def upload( + self, + source, + data, + endpoint="import_file", + query=None, + timeout=30, + ): + uri = self.resource + "/{0}".format(endpoint) if endpoint else self.resource + if query: + uri = self._build_url_with_query(uri, query) + return self._upload_file( + uri, + source, + data, + timeout=timeout, + ) + + def get_spec(self, old_spec=None, params=None): + spec = copy.deepcopy(old_spec) or self._get_default_spec() + for param, config in params.items(): + build_spec_method = self.build_spec_methods.get(param) + if build_spec_method and config: + spec, error = build_spec_method(spec, config) + if error: + return None, error + return spec, None + + @staticmethod + def _build_headers(module, additional_headers): + headers = {"Content-Type": "application/json", "Accept": "application/json"} + if additional_headers: + headers.update(additional_headers) + usr = module.params.get("nutanix_username") + pas = module.params.get("nutanix_password") + if usr and pas: + cred = "{0}:{1}".format(usr, pas) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + headers.update({"Authorization": auth_header}) + return headers + + @staticmethod + def _build_url_with_query(url, query): + url = urlparse(url) + query_ = dict(parse_qsl(url.query)) + query_.update(query) + query_ = urlencode(query_) + url = url._replace(query=query_) + return urlunparse(url) + + @staticmethod + def _filter_entities(entities, custom_filters): + filtered_entities = [] + for entity in entities: + if intersection(entity, copy.deepcopy(custom_filters)): + filtered_entities.append(entity) + return filtered_entities + + # upload file in chunks to the given url + def _upload_file(self, uri, source, data, timeout=120): + headers = { + 'Accept': 'application/json' + } + kwargs = { + "data": data, + "headers": headers, + "files": {'file': ('blob', open(source, 'rb'), 'application/json')}, + "timeout": timeout, + "jsonify": False + } + try: + response = self.session.post(uri=uri, **kwargs) + return response + except Exception as e: + logger.error(e) + + @staticmethod + def _get_default_spec(): + pass diff --git a/framework/scripts/python/helpers/fc/__init__.py b/framework/scripts/python/helpers/fc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/fc/foundation_central.py b/framework/scripts/python/helpers/fc/foundation_central.py new file mode 100644 index 0000000..8c18307 --- /dev/null +++ b/framework/scripts/python/helpers/fc/foundation_central.py @@ -0,0 +1,10 @@ +from scripts.python.helpers.entity import Entity + + +class FoundationCentral(Entity): + __BASEURL__ = "api/fc/v1" + + def __init__(self, session, resource_type): + resource_type = self.__BASEURL__ + resource_type + super(FoundationCentral, self).__init__( + session=session, resource_type=resource_type) diff --git a/framework/scripts/python/helpers/fc/imaged_clusters.py b/framework/scripts/python/helpers/fc/imaged_clusters.py new file mode 100644 index 0000000..4edff09 --- /dev/null +++ b/framework/scripts/python/helpers/fc/imaged_clusters.py @@ -0,0 +1,159 @@ +from copy import deepcopy +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.fc.foundation_central import FoundationCentral + +__metaclass__ = type + + +class ImagedCluster(FoundationCentral): + entity_type = "imaged_clusters" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/imaged_clusters" + super(ImagedCluster, self).__init__(session, self.resource_type) + self.build_spec_methods = { + "cluster_external_ip": self._build_spec_cluster_exip, + "common_network_settings": self._build_spec_common_network_settings, + "hypervisor_iso_details": self._build_spec_hypervisor_iso_details, + "storage_node_count": self._build_spec_storage_node_count, + "redundancy_factor": self._build_spec_redundancy_factor, + "cluster_name": self._build_spec_cluster_name, + "aos_package_url": self._build_spec_aos_package_url, + "cluster_size": self._build_spec_cluster_size, + "aos_package_sha256sum": self._build_spec_aos_package_sha256sum, + "timezone": self._build_spec_timezone, + "nodes_list": self._build_spec_nodes_list, + "skip_cluster_creation": self._build_spec_skip_cluster_creation, + "filters": self._build_spec_filters, + } + + def _get_default_spec(self): + return deepcopy( + { + "cluster_external_ip": "", + "common_network_settings": {}, + "redundancy_factor": 2, + "cluster_name": "", + "aos_package_url": None, + "nodes_list": [], + } + ) + + def _build_spec_cluster_exip(self, payload, value): + payload["cluster_external_ip"] = value + + return payload, None + + def _build_spec_storage_node_count(self, payload, value): + payload["storage_node_count"] = value + return payload, None + + def _build_spec_redundancy_factor(self, payload, value): + payload["redundancy_factor"] = value + return payload, None + + def _build_spec_cluster_name(self, payload, value): + payload["cluster_name"] = value + return payload, None + + def _build_spec_aos_package_url(self, payload, value): + payload["aos_package_url"] = value + return payload, None + + def _build_spec_cluster_size(self, payload, value): + payload["cluster_size"] = value + return payload, None + + def _build_spec_aos_package_sha256sum(self, payload, value): + payload["aos_package_sha256sum"] = value + return payload, None + + def _build_spec_timezone(self, payload, value): + payload["timezone"] = value + return payload, None + + def _build_spec_skip_cluster_creation(self, payload, value): + payload["skip_cluster_creation"] = value + return payload, None + + def _build_spec_common_network_settings(self, payload, nsettings): + net = self._get_default_network_settings(nsettings) + payload["common_network_settings"] = net + return payload, None + + def _build_spec_hypervisor_iso_details(self, payload, value): + hiso = self._get_default_hypervisor_iso_details(value) + payload["hypervisor_iso_details"] = hiso + return payload, None + + def _build_spec_nodes_list(self, payload, node_details): + nodes_list = [] + for node in node_details: + spec = self._get_default_nodes_spec(node) + nodes_list.append(spec) + payload["nodes_list"] = node_details + return payload, None + + def _build_spec_filters(self, payload, value): + payload["filters"] = value + return payload, None + + def _get_default_hypervisor_iso_details(self, isodetails): + spec = {} + default_spec = { + "hyperv_sku": None, + "url": None, + "hypervisor_type": None, + "hyperv_product_key": None, + "sha256sum": None, + } + for k in default_spec: + v = isodetails.get(k) + if v: + spec[k] = v + return spec + + def _get_default_network_settings(self, cnsettings): + spec = {} + default_spec = { + "cvm_dns_servers": [], + "hypervisor_dns_servers": [], + "cvm_ntp_servers": [], + "hypervisor_ntp_servers": [], + } + + for k in default_spec: + v = cnsettings.get(k) + if v: + spec[k] = v + return spec + + def _get_default_nodes_spec(self, node): + spec = {} + default_spec = { + "cvm_gateway": None, + "ipmi_netmask": None, + "rdma_passthrough": False, + "imaged_node_uuid": None, + "cvm_vlan_id": None, + "hypervisor_type": None, + "image_now": True, + "hypervisor_hostname": None, + "hypervisor_netmask": None, + "cvm_netmask": None, + "ipmi_ip": None, + "hypervisor_gateway": None, + "hardware_attributes_override": {}, + "cvm_ram_gb": None, + "cvm_ip": None, + "hypervisor_ip": None, + "use_existing_network_settings": False, + "ipmi_gateway": None, + } + + for k in default_spec: + if k in node: + v = node.get(k) + if v: + spec[k] = v + return spec diff --git a/framework/scripts/python/helpers/fc/imaged_nodes.py b/framework/scripts/python/helpers/fc/imaged_nodes.py new file mode 100644 index 0000000..39e947f --- /dev/null +++ b/framework/scripts/python/helpers/fc/imaged_nodes.py @@ -0,0 +1,41 @@ +from copy import deepcopy +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.fc.foundation_central import FoundationCentral + +__metaclass__ = type + + +class ImagedNode(FoundationCentral): + entity_type = "imaged_nodes" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/imaged_nodes" + super(ImagedNode, self).__init__(session, self.resource_type) + self.build_spec_methods = {"filters": self._build_spec_filters} + + def _build_spec_filters(self, payload, value): + payload["filters"] = value + return payload, None + + def _get_default_spec(self): + return deepcopy({"filters": {"node_state": ""}}) + + # Helper functions + def node_details_by_block_serial(self, block_serials: list, node_state: str = "STATE_AVAILABLE"): + spec = self._get_default_spec() + spec["filters"]["node_state"] = node_state + resp = self.list(data=spec) + node_list = [] + for node in resp: + if node["block_serial"] in block_serials: + node_list.append(node) + return node_list, None + + # Helper function + def node_details(self, node_state: str = "STATE_AVAILABLE"): + spec = self._get_default_spec() + spec["filters"]["node_state"] = node_state + resp = self.list(data=spec) + if not resp: + return None, "No available nodes registered to Foundation Central." + return resp, None diff --git a/framework/scripts/python/helpers/pc_batch_op.py b/framework/scripts/python/helpers/pc_batch_op.py new file mode 100644 index 0000000..4bcd9ac --- /dev/null +++ b/framework/scripts/python/helpers/pc_batch_op.py @@ -0,0 +1,157 @@ +import copy +import json +from typing import List, Optional +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil + +logger = get_logger(__name__) + +BATCH_TIMEOUT = 6 * 60 +MAX_BATCH_API_CALLS = 60 + + +class PcBatchOp(object): + """ + This is helper class to do V3 Batch api calls. + """ + BATCH_BASE = "batch" + PAYLOAD = { + "action_on_failure": "CONTINUE", + "execution_order": "SEQUENTIAL", + "api_request_list": [] + } + + def __init__(self, session: RestAPIUtil, base_url: str, **kwargs): + """ + Default Constructor for PcBatchOp class + Args: + cluster (Cluster): Cluster object + kwargs: + base_url (str): URL_BASE of the Entity + kind (str): V3_KIND of the Entity + """ + self.session = session + # Batch APIs are failing for "api/nutanix/v3" hence adding an escape character for /v + self.base_url = "api/nutanix//v3" + self.resource_type = kwargs.get("resource_type") + self.kind = kwargs.get("kind") + + def batch(self, api_request_list: List): + """ + Call batch API + Args: + api_request_list (list): Payload for batch + Returns: + list of api_response_list + """ + api_request_chunks = [ + api_request_list[i:i + MAX_BATCH_API_CALLS] + for i in range(0, len(api_request_list), MAX_BATCH_API_CALLS) + ] + + api_response_list = [] + for request_list in api_request_chunks: + payload = copy.deepcopy(self.PAYLOAD) + + payload["api_request_list"] = request_list + logger.debug("Batch Payload: {}".format(payload)) + + batch_response = self.session.post( + uri=f"{self.base_url}/{self.BATCH_BASE}", + data=payload, + timeout=BATCH_TIMEOUT + ) + if batch_response.get('api_response_list', None): + api_response_list.extend(batch_response.get('api_response_list')) + return api_response_list + + def batch_create(self, request_payload_list: Optional[List]): + """ + Create entities using v3 batch api + + Args: + request_payload_list(list): request payload dict including spec, + metadata and api_version + + Returns: + list: List of Task UUIDs + """ + api_request_payload = { + "operation": "POST", + "path_and_params": f"{self.base_url}{self.resource_type}", + "body": { + } + } + + api_request_list = [] + if request_payload_list: + for request_payload in request_payload_list: + api_request = copy.deepcopy(api_request_payload) + + if request_payload.get("spec"): + api_request["body"]["spec"] = request_payload.get("spec", None) + api_request["body"]["metadata"] = request_payload.get("metadata", {"kind": self.kind}) + else: + api_request["body"] = request_payload + api_request_list.append(api_request) + api_response_list = self.batch(api_request_list) + + return get_task_uuid_list(api_response_list) + + def batch_update(self, entity_update_list: List): + """ + Batch update an entity + Args: + entity_update_list (list): List of dicts with uuid, spec and metadata + eg, [{ + "uuid": uuid, "spec": spec, "metadata": metadata}} + , ..] + Returns: + list : Task UUID list + """ + api_request_list = [] + for entity_data in entity_update_list: + if entity_data.get('uuid') and entity_data.get('spec') and entity_data.get('metadata'): + request = { + "operation": "PUT", + "path_and_params": f"{self.base_url}{self.resource_type}/{entity_data['uuid']}", + "body": { + "spec": entity_data.get('spec'), + "metadata": entity_data.get('metadata') + } + } + api_request_list.append(request) + + api_response_list = self.batch(api_request_list) + return get_task_uuid_list(api_response_list) + + +def get_task_uuid_list(api_response_list: List): + """ + Parse the batch api response list to get the Task uuids + Args: + api_response_list(list): Batch api response list + Returns: + list : list of Task uuids + """ + task_uuid_list = [] + for response in api_response_list: + if response.get("status"): + if not response["status"].startswith("2"): + logger.error(response) + + api_response = response.get("api_response") + + # todo bug + # sometimes api_response in str + if type(api_response) == str: + try: + api_response = json.loads(api_response) + except: + raise Exception("Cannot get task list to monitor for the batch call!") + + if api_response.get('status', {}).get('execution_context', {}).get('task_uuid'): + task_uuid = api_response['status']['execution_context']['task_uuid'] + task_uuid_list.append(task_uuid) + + return task_uuid_list diff --git a/framework/scripts/python/helpers/pc_entity.py b/framework/scripts/python/helpers/pc_entity.py new file mode 100644 index 0000000..4cabfd0 --- /dev/null +++ b/framework/scripts/python/helpers/pc_entity.py @@ -0,0 +1,57 @@ +from copy import deepcopy +from typing import Optional +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.entity import Entity +from scripts.python.helpers.pc_batch_op import PcBatchOp + + +class PcEntity(Entity): + __BASEURL__ = "api/nutanix/v3" + resource_type = "" + kind = "" + V3_LIST_CHUNKSIZE = 250 + + def __init__(self, session: RestAPIUtil, **kwargs): + resource_type = self.__BASEURL__ + self.resource_type + self.batch_op = PcBatchOp(session, base_url=self.__BASEURL__, resource_type=self.resource_type, kind=self.kind, **kwargs) + super(PcEntity, self).__init__(session=session, resource_type=resource_type) + + def list(self, **kwargs): + payload = { + "kind": kwargs.pop("kind", self.kind), + "offset": kwargs.pop("offset", 0), + "filter": kwargs.pop("filter", ""), + "length": kwargs.pop("length", self.V3_LIST_CHUNKSIZE) + } + return super(PcEntity, self).list(data=payload, **kwargs) + + def get_entity_by_name(self, entity_name: str, **kwargs): + entities = self.list(**kwargs) + + for entity in entities: + if entity.get("spec", {}).get("name"): + name = entity["spec"]["name"] + elif entity.get("status", {}).get("name"): + name = entity["status"]["name"] + else: + continue + if name == entity_name: + return entity + return None + + def get_uuid_by_name(self, entity_name: Optional[str] = None, entity_data: Optional[dict] = None, **kwargs): + if not entity_data: + if not entity_name: + raise Exception("Entity name is needed to get the UUID") + entity_data = self.get_entity_by_name(entity_name, **kwargs) + if not entity_data: + return None + return entity_data["metadata"]["uuid"] + + def reference_spec(self): + return deepcopy( + { + "kind": self.kind, + "uuid": "" + } + ) diff --git a/framework/scripts/python/helpers/pe_entity_v1.py b/framework/scripts/python/helpers/pe_entity_v1.py new file mode 100644 index 0000000..97ff40b --- /dev/null +++ b/framework/scripts/python/helpers/pe_entity_v1.py @@ -0,0 +1,37 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.entity import Entity + + +class PeEntityV1(Entity): + __BASEURL__ = "api/nutanix/v1" + resource_type = "" + + def __init__(self, session: RestAPIUtil, proxy_cluster_uuid=None): + resource_type = self.__BASEURL__ + self.resource_type + self.proxy_cluster_uuid = proxy_cluster_uuid + super(PeEntityV1, self).__init__(session=session, resource_type=resource_type) + + """ + This method uses available fan-out API in PC. These APIs are not documented for external use but are used + by the PC UI extensively. To access these APIs use the same URL as PE with the PC IP and additional query + parameter proxyClusterUuid=all_clusters or pass the respective cluster uuid + + api_version: v1/v2 + """ + def get_proxy_endpoint(self, endpoint): + if self.proxy_cluster_uuid: + separator = "&" if "?" in endpoint else "?" + endpoint = f"{endpoint}{separator}proxyClusterUuid={self.proxy_cluster_uuid}" + return endpoint + + def read(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV1, self).read(endpoint=endpoint, **kwargs) + + def create(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV1, self).create(endpoint=endpoint, **kwargs) + + def update(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV1, self).update(endpoint=endpoint, **kwargs) diff --git a/framework/scripts/python/helpers/pe_entity_v2.py b/framework/scripts/python/helpers/pe_entity_v2.py new file mode 100644 index 0000000..5005b39 --- /dev/null +++ b/framework/scripts/python/helpers/pe_entity_v2.py @@ -0,0 +1,43 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.entity import Entity + + +class PeEntityV2(Entity): + __BASEURL__ = "api/nutanix/v2.0" + resource_type = "" + + def __init__(self, session: RestAPIUtil, proxy_cluster_uuid=None): + resource_type = self.__BASEURL__ + self.resource_type + self.proxy_cluster_uuid = proxy_cluster_uuid + super(PeEntityV2, self).__init__(session=session, resource_type=resource_type) + + """ + This method uses available fan-out API in PC. These APIs are not documented for external use but are used + by the PC UI extensively. To access these APIs use the same URL as PE with the PC IP and additional query + parameter proxyClusterUuid=all_clusters or pass the respective cluster uuid + + api_version: v1/v2 + """ + def get_proxy_endpoint(self, endpoint): + if self.proxy_cluster_uuid: + separator = "&" if "?" in endpoint else "?" + endpoint = f"{endpoint}{separator}proxyClusterUuid={self.proxy_cluster_uuid}" + return endpoint + + def read(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV2, self).read(endpoint=endpoint, **kwargs) + + def create(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV2, self).create(endpoint=endpoint, **kwargs) + + def update(self, **kwargs): + endpoint = self.get_proxy_endpoint(kwargs.pop("endpoint", "")) + return super(PeEntityV2, self).update(endpoint=endpoint, **kwargs) + + def get_uuid(self): + uuid = self.read().get("uuid") + if not uuid: + raise Exception(f"Could not fetch the UUID of the entity {type(self).__name__}") + return uuid diff --git a/framework/scripts/python/helpers/state_monitor/__init__.py b/framework/scripts/python/helpers/state_monitor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/state_monitor/application_state_monitor.py b/framework/scripts/python/helpers/state_monitor/application_state_monitor.py new file mode 100644 index 0000000..4ab1916 --- /dev/null +++ b/framework/scripts/python/helpers/state_monitor/application_state_monitor.py @@ -0,0 +1,60 @@ +from helpers.log_utils import get_logger +from scripts.python.helpers.state_monitor.state_monitor import StateMonitor +from scripts.python.helpers.v3.application import Application + +logger = get_logger(__name__) + + +class ApplicationStateMonitor(StateMonitor): + """ + The class to monitor the application state. + """ + DEFAULT_TIMEOUT_IN_SEC = 20*60 + DEFAULT_CHECK_INTERVAL_IN_SEC = 120 + + def __init__(self, session, **kwargs): + """ + The constructor for ApplicationStateMonitor + Args: + session: request session to query the API + kwargs: + expected_states(list): expected states of application + NOTE: default value is ['running'] + unexpected_states(list): unexpected states of entity + NOTE: default value is ['error'] + application_uuid(str): uuid of application + application_op(ApplicationOp): application entity op. + """ + self.session = session + self._expected_states = kwargs.get('expected_states', ['running']) + self._unexpected_states = kwargs.get('unexpected_states', ['error']) + self._application_uuid = kwargs.get('application_uuid') + + def check_status(self): + """ + Checks the state if "application_uuid" is among "expected_states" + + Returns: + True if entity in required states + False if entity is not in required states within timeout. + """ + + application_op = Application(self.session) + response = application_op.read(uuid=self._application_uuid) + + if response and response.get('status'): + status = response['status']['state'] + else: + logger.error("Error in the response from the API call") + return None, False + + if status in self._unexpected_states: + logger.error(f"State for application with uuid [{self._application_uuid}] is not expected." + "\nState of this application is [{status}]") + return None, True + elif status in self._expected_states: + logger.info(f"State on Application deployment changed successfully to [{status}]") + return response, True + else: + logger.warning("Application state did not match the expected state") + return None, False diff --git a/framework/scripts/python/helpers/state_monitor/blueprint_launch_monitor.py b/framework/scripts/python/helpers/state_monitor/blueprint_launch_monitor.py new file mode 100644 index 0000000..813b9fb --- /dev/null +++ b/framework/scripts/python/helpers/state_monitor/blueprint_launch_monitor.py @@ -0,0 +1,51 @@ +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.state_monitor import StateMonitor +from scripts.python.helpers.v3.blueprint import Blueprint + +logger = get_logger(__name__) + + +class BlueprintLaunchMonitor(StateMonitor): + """ + The class to wait for blueprint launch status to come in expected state + """ + DEFAULT_CHECK_INTERVAL_IN_SEC = 5 + DEFAULT_TIMEOUT_IN_SEC = 300 + + def __init__(self, session: RestAPIUtil, **kwargs): + """ + The constructor for BlueprintLaunchMonitor + Args: + kwargs: + session: request session to query the API + expected_state(str): expected state of blueprint launch required + blueprint_uuid(str): uuid of blueprint + request_id(str): id of launch request + """ + self.session = session + self.expected_state = kwargs.get('expected_state', 'success') + self.blueprint_uuid = kwargs.get('blueprint_uuid') + self.request_id = kwargs.get('request_id') + + def check_status(self): + """ + Checks the state of blueprint launch if expected state or not + Returns: + True if in expected state else false + """ + blueprint_op = Blueprint(self.session) + response = blueprint_op.read(uuid=self.blueprint_uuid, endpoint=f"pending_launches/{self.request_id}") + + if response and response.get('status'): + status = response['status']['state'] + else: + logger.error("Error in the response from the API call") + return None, False + + if status != self.expected_state: + logger.warning("Blueprint launch did not match the expected state") + return None, False + else: + logger.info(f"State on Blueprint launch changed successfully to [{status}]") + return response, True diff --git a/framework/scripts/python/helpers/state_monitor/pc_register_monitor.py b/framework/scripts/python/helpers/state_monitor/pc_register_monitor.py new file mode 100644 index 0000000..aaa7842 --- /dev/null +++ b/framework/scripts/python/helpers/state_monitor/pc_register_monitor.py @@ -0,0 +1,45 @@ +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.state_monitor import StateMonitor +from scripts.python.helpers.v3.cluster import Cluster as PcCluster + +logger = get_logger(__name__) + + +class PcRegisterMonitor(StateMonitor): + """ + The class to wait for blueprint launch status to come in expected state + """ + DEFAULT_CHECK_INTERVAL_IN_SEC = 30 + DEFAULT_TIMEOUT_IN_SEC = 10 * 60 + + def __init__(self, session: RestAPIUtil, **kwargs): + """ + The constructor for PcRegisterMonitor + Args: + kwargs: + session: request session to query the API + pe_uuids(list): List of UUIDs of PE clusters to be verified + """ + self.session = session + self._pe_uuids = kwargs.get('pe_uuids') + + def check_status(self): + """ + Check whether newly registered PE's show up in PC + + Returns: + bool: True + """ + pc_cluster = PcCluster(self.session) + pc_cluster.get_pe_info_list() + pc_cluster_uuids = pc_cluster.name_uuid_map.values() + + if not pc_cluster_uuids: + return pc_cluster_uuids, False + + cluster_sync_complete = True + for pe_uuid in self._pe_uuids: + if pe_uuid not in pc_cluster_uuids: + cluster_sync_complete = False + return pc_cluster_uuids, cluster_sync_complete diff --git a/framework/scripts/python/helpers/state_monitor/pc_task_monitor.py b/framework/scripts/python/helpers/state_monitor/pc_task_monitor.py new file mode 100644 index 0000000..aeda599 --- /dev/null +++ b/framework/scripts/python/helpers/state_monitor/pc_task_monitor.py @@ -0,0 +1,88 @@ +from typing import List +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.state_monitor import StateMonitor +from scripts.python.helpers.v3.task import Task + +logger = get_logger(__name__) + + +class PcTaskMonitor(StateMonitor): + """ + The class to wait for task status to come in expected state + """ + DEFAULT_CHECK_INTERVAL_IN_SEC = 5 + DEFAULT_TIMEOUT_IN_SEC = 300 + + def __init__(self, session: RestAPIUtil, **kwargs): + """ + The constructor for TaskMonitor + Args: + kwargs: + session: request pc session to query the API + expected_state(str): expected state of task + task_uuid(str): uuid of task + """ + self.session = session + self.expected_state = kwargs.get('expected_state', 'SUCCEEDED') + self.task_uuid_list = kwargs.get('task_uuid_list') + self.completed_task_list = [] + self.failed_task_list = [] + self.task_op = Task(self.session) + + def check_status(self): + """ + Checks the task is in expected state or not + Returns: + True if in expected state else false + """ + logger.info("Total Tasks: {}".format(len(self.task_uuid_list))) + logger.info("Completed Tasks: {}".format(len(self.completed_task_list))) + + completed = False + response = None + + if not self.task_uuid_list: + completed = True + + self.completed_task_list = [] + self.failed_task_list = [] + for subset_uuid_list in self.__uuid_list_chunks(self.task_uuid_list): + completed_tasks = self.task_op.poll(subset_uuid_list) + for completed_task in completed_tasks: + if completed_task.get("status") == "FAILED": + self.failed_task_list.append(completed_task) + else: + self.completed_task_list.append(completed_tasks) + + if len(self.completed_task_list) == len(self.task_uuid_list): + completed = True + elif len(self.completed_task_list) + len(self.failed_task_list) == len(self.task_uuid_list): + completed = True + response = f"{self.failed_task_list}" + + logger.info("[{}/{}] Tasks Completed".format(len(self.completed_task_list), + len(self.task_uuid_list))) + return response, completed + + @property + def incomplete_task_uuids(self) -> List: + """ + Get Incomplete task uuids + Returns: + list + """ + return list(set(self.task_uuid_list) - set(self.completed_task_uuids)) + + @staticmethod + def __uuid_list_chunks(uuid_list: List, chunk_size=100): + """ + Given list of uuid, return chunks of uuids + Args: + uuid_list (list): List of uuids + chunk_size (int): Chunk size, default 100 + Returns: + generator + """ + for i in range(0, len(uuid_list), chunk_size): + yield uuid_list[i:i + chunk_size] diff --git a/framework/scripts/python/helpers/state_monitor/state_monitor.py b/framework/scripts/python/helpers/state_monitor/state_monitor.py new file mode 100644 index 0000000..cc9bff2 --- /dev/null +++ b/framework/scripts/python/helpers/state_monitor/state_monitor.py @@ -0,0 +1,62 @@ +import time + +from helpers.log_utils import get_logger +from abc import abstractmethod, ABC + +logger = get_logger(__name__) + + +class StateMonitor(ABC): + """ + Abstract class for monitor. + """ + DEFAULT_TIMEOUT_IN_SEC = 1800 + DEFAULT_CHECK_INTERVAL_IN_SEC = 5 + + def monitor(self, query_retries=True): + """ + Keep waiting until target status is matched. No Exceptions will be raise + when timed out, False is return instead. It is up to the caller to make + decision about what to do when timed out. + + Args: + query_retries(bool): False means monitor won't retry with timeout. + + Returns: + bool: True if target status is matched, False otherwise. + """ + start_time = time.time() + status_matched = False + response = {} + elapsed_time = 0 + is_timeout = False + + logger.info("Started monitoring the state...") + while not is_timeout and not status_matched: + response, status_matched = self.check_status() + if not query_retries: + return status_matched + + if not status_matched: + logger.info(f"Wait {self.DEFAULT_CHECK_INTERVAL_IN_SEC} seconds for the next check...") + time.sleep(self.DEFAULT_CHECK_INTERVAL_IN_SEC) + + elapsed_time = time.time() - start_time + if elapsed_time >= self.DEFAULT_TIMEOUT_IN_SEC: + is_timeout = True + + if status_matched: + logger.info(f"Completed {type(self).__name__} in duration: {elapsed_time:.2f} seconds") + return response, True + else: + timeout_message = f"Timed out after {elapsed_time:.2f} seconds" + logger.error(timeout_message) + return None, False + + @abstractmethod + def check_status(self): + """ + Check the status is match the expected status or not. All the subclass + must override this method + """ + pass diff --git a/framework/scripts/python/helpers/update_project_dsl.py.jinja b/framework/scripts/python/helpers/update_project_dsl.py.jinja new file mode 100644 index 0000000..75744f4 --- /dev/null +++ b/framework/scripts/python/helpers/update_project_dsl.py.jinja @@ -0,0 +1,23 @@ +from calm.dsl.builtins import Project, Provider, Ref + +NTNX_ACCOUNT = "{{ NTNX_ACCOUNT }}" +SUBNET_CLUSTER_MAPPING = {{ SUBNET_CLUSTER_MAPPING }} + + +class UpdateProvider(Project): + """ + Update the project + 1. Append the cluster and subnet information for the specified Nutanix account in the project + """ + + providers = [ + Provider.Ntnx( + account=Ref.Account(NTNX_ACCOUNT), + clusters=[Ref.Cluster(name=cluster_name, account_name=NTNX_ACCOUNT) + for cluster_name in SUBNET_CLUSTER_MAPPING.keys()], + subnets=[ + Ref.Subnet(name=subnet, cluster=cluster_name) + for cluster_name, subnet_list in SUBNET_CLUSTER_MAPPING.items() for subnet in subnet_list + ] + ) + ] diff --git a/framework/scripts/python/helpers/v1/__init__.py b/framework/scripts/python/helpers/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/v1/authentication.py b/framework/scripts/python/helpers/v1/authentication.py new file mode 100644 index 0000000..17a8953 --- /dev/null +++ b/framework/scripts/python/helpers/v1/authentication.py @@ -0,0 +1,51 @@ +from typing import List +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class AuthN(PeEntityV1): + def __init__(self, session: RestAPIUtil): + self.resource_type = "/authconfig" + self.session = session + super(AuthN, self).__init__(session=session) + + def create_directory_services(self, **ad_params): + endpoint = "directories" + spec = { + "name": ad_params["ad_name"], + "domain": ad_params["ad_domain"], + "directoryUrl": "ldap://{}:389".format(ad_params["ad_server_ip"]), + "groupSearchType": ad_params.get("group_search_type", "NON_RECURSIVE"), + "directoryType": ad_params["directory_type"], + "connectionType": "LDAP", + "serviceAccountUsername": ad_params["service_account_username"], + "serviceAccountPassword": ad_params["service_account_password"] + } + return self.create(data=spec, endpoint=endpoint, timeout=360) + + def get_directories(self): + endpoint = "directories" + return self.read(endpoint=endpoint) + + def create_role_mapping(self, directory_name: str, role_mappings: List, cluster_info: str): + endpoint = f"directories/{directory_name}/role_mappings" + existing_role_mapping = self.read(endpoint=endpoint) + + existing_role = set([mapping["role"] for mapping in existing_role_mapping]) + for role_mapping in role_mappings: + if role_mapping.get("role_type") not in existing_role: + spec = { + "directoryName": directory_name, + "role": role_mapping["role_type"], + "entityType": role_mapping["entity_type"], + "entityValues": role_mapping["values"] + } + response = self.create(data=spec, endpoint=endpoint) + if isinstance(response, str): + raise Exception(response) + logger.info(f"Created Role Mapping type '{role_mapping['role_type']}' in the cluster {cluster_info}") + else: + logger.warning(f"Role Mapping '{role_mapping['role_type']}' already exists in the cluster {cluster_info}") diff --git a/framework/scripts/python/helpers/v1/container.py b/framework/scripts/python/helpers/v1/container.py new file mode 100644 index 0000000..725cbfc --- /dev/null +++ b/framework/scripts/python/helpers/v1/container.py @@ -0,0 +1,98 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger +from scripts.python.helpers.v1.storage_pool import StoragePool + +logger = get_logger(__name__) + + +class Container(PeEntityV1): + def __init__(self, session: RestAPIUtil): + self.resource_type = "/containers" + self.session = session + super(Container, self).__init__(session=session) + + def create(self, **kwargs): + # check if already exists + container_list = self.read() + + for container in container_list: + if container.get("name") == kwargs.get("name"): + logger.warning("Container already exists!") + return + data = self.get_json_for_create(**kwargs) + response = super(Container, self).create(data=data) + if response["value"]: + logger.info(f"Creation of Storage container {kwargs.get('name')} successful!") + else: + raise Exception(f"Could not create Storage container. Error: {response}") + + def get_json_for_create(self, **kwargs): + """ + Helper function to generate container config spec(json) for creation. + + Args: + kwargs(dict): + name(str): the name to container. + storage_pool_uuid(str,optional): The uuid of the storage pool. + advertised_capacity(int, optional): The advertised capacity of the + container. + replication_factor(int, optional): The replication factor value. + compression_enabled(bool, optional): enable compression or not. + compression_delay_in_secs(int, optional): compression delay in seconds. + erasure_code(str,optional): Turn on erasure code or not. Default value + is OFF. + finger_print_on_write(str, optional): Turn on dedup or not. Default + value is OFF. + on_disk_dedup(str, optional): Turn on disk dedup or not. Default + value is OFF. + + Returns: + dict: The container config spec for creation. + + Raises: + ValueError exception will be raised when no storage pool was found. + """ + name = kwargs.pop("name") + storage_pool_uuid = kwargs.pop("storage_pool_uuid", None) + reserved_in_gb = kwargs.pop("reserved_in_gb", 0) + advertised_capacity = kwargs.pop("advertisedCapacity_in_gb", None) + replication_factor = kwargs.pop("replication_factor", None) + compression_enabled = kwargs.pop("compression_enabled", True) + compression_delay_in_secs = kwargs.pop("compression_delay_in_secs", 0) + enable_software_encryption = kwargs.pop("enable_software_encryption", False) + erasure_code = kwargs.pop("erasure_code", "OFF") + finger_print_on_write = kwargs.pop("finger_print_on_write", "OFF") + on_disk_dedup = kwargs.pop("on_disk_dedup", "OFF") + affinity_host_uuid = kwargs.pop("affinity_host_uuid", None) + + if not storage_pool_uuid: + storage_pool_list = StoragePool(self.session).read() + if not storage_pool_list: + raise ValueError("No storage pools found!") + else: + storage_pool_uuid = storage_pool_list[0].get("storagePoolUuid") + + json = { + "name": name, + "storagePoolUuid": storage_pool_uuid, + "totalExplicitReservedCapacity": int(reserved_in_gb) * 1024 * 1024 * 1024, + "advertisedCapacity": int(advertised_capacity) * 1024 * 1024 * 1024 if advertised_capacity else None, + "compressionEnabled": compression_enabled, + "compressionDelayInSecs": compression_delay_in_secs, + "erasureCode": erasure_code, + "fingerPrintOnWrite": finger_print_on_write, + "onDiskDedup": on_disk_dedup, + "nfsWhitelistAddress": [] + } + + if enable_software_encryption: + json["enableSoftwareEncryption"] = enable_software_encryption + + if replication_factor: + json["replicationFactor"] = str(replication_factor) + + if affinity_host_uuid: + json["affinityHostUuid"] = affinity_host_uuid + + return json diff --git a/framework/scripts/python/helpers/v1/eula.py b/framework/scripts/python/helpers/v1/eula.py new file mode 100644 index 0000000..2cf23e2 --- /dev/null +++ b/framework/scripts/python/helpers/v1/eula.py @@ -0,0 +1,44 @@ +import sys +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Eula(PeEntityV1): + """ + Class to accept End-User License Agreement (EULA) + """ + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/eulas" + super(Eula, self).__init__(session=session) + + def is_eula_accepted(self): + """ + This method check whether eula enabled or not. + Returns: + (boolean): True if EULA is accepted + False if eula is not accepted + """ + response = self.read() + for entity in response: + if 'userDetailsList' in entity: + return True + return + + def accept_eula(self, username: str, company_name: str, job_title: str, cluster_info): + """Accept End-User License Agreement (EULA) + """ + endpoint = "accept" + data = { + "username": username, + "companyName": company_name, + "jobTitle": job_title + } + response = self.create(data=data, endpoint=endpoint) + if response["value"]: + logger.info(f"Accepted End-User License Agreement in {cluster_info}") + else: + raise Exception(f"Could not Accept End-User License Agreement in {cluster_info}. Error: {response}") diff --git a/framework/scripts/python/helpers/v1/multicluster.py b/framework/scripts/python/helpers/v1/multicluster.py new file mode 100644 index 0000000..e846d1b --- /dev/null +++ b/framework/scripts/python/helpers/v1/multicluster.py @@ -0,0 +1,41 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class MultiCluster(PeEntityV1): + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/multicluster" + super(MultiCluster, self).__init__(session=session) + + def get_cluster_external_state(self): + """ + + """ + endpoint = "cluster_external_state" + return self.read(endpoint=endpoint, timeout=90) + + def register_pe_to_pc(self, pe_ip, pc_ip, pc_username, pc_password): + logger.info("Registering PE {} to PC {}".format(pe_ip, pc_ip)) + data = { + "ipAddresses": [pc_ip], + "username": pc_username, + "password": pc_password + } + + endpoint = "add_to_multicluster" + return self.create(data=data, endpoint=endpoint, timeout=120) + + def deregister_pe_from_pc(self, pe_ip, pc_ip, pc_username, pc_password): + logger.info("De-registering PE {} from PC {}".format(pe_ip, pc_ip)) + data = { + "ipAddresses": [pc_ip], + "username": pc_username, + "password": pc_password + } + + endpoint = "remove_from_multicluster" + return self.create(data=data, endpoint=endpoint) diff --git a/framework/scripts/python/helpers/v1/pulse.py b/framework/scripts/python/helpers/v1/pulse.py new file mode 100644 index 0000000..c583e96 --- /dev/null +++ b/framework/scripts/python/helpers/v1/pulse.py @@ -0,0 +1,32 @@ +import logging + +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 + +logger = get_logger(__name__) + + +class Pulse(PeEntityV1): + def __init__(self, session: RestAPIUtil): + self.resource_type = "/pulse" + super(Pulse, self).__init__(session=session) + + def update_pulse(self, cluster_info: str, enable: bool = True): + """Enable/disable Pulse + + Args: + enable (bool, optional): Enable or Disable Pulse in PE. Defaults to True. + cluster_info + """ + data = { + "enable": enable, + "isPulsePromptNeeded": False + } + + # get current status + current_status = self.read().get("enable") + + if current_status == enable: + logger.warning(f"Pulse is already '{enable}' in the cluster {cluster_info}") + self.update(data=data) diff --git a/framework/scripts/python/helpers/v1/storage_pool.py b/framework/scripts/python/helpers/v1/storage_pool.py new file mode 100644 index 0000000..0741f5f --- /dev/null +++ b/framework/scripts/python/helpers/v1/storage_pool.py @@ -0,0 +1,12 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class StoragePool(PeEntityV1): + def __init__(self, session: RestAPIUtil): + self.resource_type = "/storage_pools" + self.session = session + super(StoragePool, self).__init__(session=session) diff --git a/framework/scripts/python/helpers/v1/utils_manager.py b/framework/scripts/python/helpers/v1/utils_manager.py new file mode 100644 index 0000000..63e1576 --- /dev/null +++ b/framework/scripts/python/helpers/v1/utils_manager.py @@ -0,0 +1,29 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v1 import PeEntityV1 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class UtilsManager(PeEntityV1): + """ + Class to change the default system password + """ + DEFAULT_SYSTEM_PASSWORD = "Nutanix/4u" + DEFAULT_USERNAME = "admin" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/utils" + super(UtilsManager, self).__init__(session=session) + + def change_default_system_password(self, new_password, cluster_info: str): + endpoint = "change_default_system_password" + data = { + "oldPassword": self.DEFAULT_SYSTEM_PASSWORD, + "newPassword": new_password + } + response = self.create(data=data, endpoint=endpoint, timeout=120) + if response["value"]: + logger.info(f"Default System password updated with new password in {cluster_info}") + else: + raise Exception(f"Could not change the system password in {cluster_info}. Error: {response}") diff --git a/framework/scripts/python/helpers/v2/__init__.py b/framework/scripts/python/helpers/v2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/v2/cluster.py b/framework/scripts/python/helpers/v2/cluster.py new file mode 100644 index 0000000..7495769 --- /dev/null +++ b/framework/scripts/python/helpers/v2/cluster.py @@ -0,0 +1,23 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pe_entity_v2 import PeEntityV2 +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Cluster(PeEntityV2): + def __init__(self, session: RestAPIUtil): + self.resource_type = "/cluster" + self.cluster_info = {} + super(Cluster, self).__init__(session=session) + + def get_cluster_info(self): + return self.cluster_info.update(self.read()) + + def update_dsip(self, dsip: str): + cluster_config = self.read() + data = { + "cluster_external_data_services_ipaddress": dsip + } + cluster_config.update(data) + return self.update(data=cluster_config) \ No newline at end of file diff --git a/framework/scripts/python/helpers/v3/__init__.py b/framework/scripts/python/helpers/v3/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/helpers/v3/address_group.py b/framework/scripts/python/helpers/v3/address_group.py new file mode 100644 index 0000000..9c3da9d --- /dev/null +++ b/framework/scripts/python/helpers/v3/address_group.py @@ -0,0 +1,66 @@ +from copy import deepcopy +from typing import Optional, List + +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class AddressGroup(PcEntity): + kind = "address_group" + + def __init__(self, module): + self.resource_type = "/address_groups" + super(AddressGroup, self).__init__(module) + + def get_uuid_by_name(self, entity_name: Optional[str] = None, entity_data: Optional[dict] = None, **kwargs): + kwargs.pop("filter", None) + filter_criteria = f"name=={entity_name}" + response = self.list(filter=filter_criteria, **kwargs) + + for entity in response: + if entity.get("address_group", {}).get("name") == entity_name: + return entity.get("uuid") + + def create_address_group_spec(self, ag_info): + spec = self._get_default_spec() + # Get the name + self._build_spec_name(spec, ag_info["name"]) + # Get description + self._build_spec_desc(spec, ag_info.get("description")) + # Get ip_address_block_list + self._build_spec_subnets(spec, ag_info.get("subnets", [])) + + logger.debug(spec) + return spec + + def _get_default_spec(self): + return deepcopy({ + "name": None, + "description": "", + "ip_address_block_list": [] + }) + + @staticmethod + def _build_spec_name(payload, name): + payload["name"] = name + + @staticmethod + def _build_spec_desc(payload, desc): + payload["description"] = desc + + def _build_spec_subnets(self, payload, subnets: List): + ip_address_block_list = [] + for subnet in subnets: + ip_address_block_list.append( + self._get_ip_address_block( + subnet["network_ip"], subnet["network_prefix"] + ) + ) + payload["ip_address_block_list"] = ip_address_block_list + + @staticmethod + def _get_ip_address_block(ip, prefix): + spec = {"ip": ip, "prefix_length": prefix} + return spec diff --git a/framework/scripts/python/helpers/v3/application.py b/framework/scripts/python/helpers/v3/application.py new file mode 100644 index 0000000..e67fd22 --- /dev/null +++ b/framework/scripts/python/helpers/v3/application.py @@ -0,0 +1,10 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity + + +class Application(PcEntity): + kind = "app" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/apps" + super(Application, self).__init__(session=session) diff --git a/framework/scripts/python/helpers/v3/availabilty_zone.py b/framework/scripts/python/helpers/v3/availabilty_zone.py new file mode 100644 index 0000000..14c5b74 --- /dev/null +++ b/framework/scripts/python/helpers/v3/availabilty_zone.py @@ -0,0 +1,27 @@ +from typing import Optional + +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class AvailabilityZone(PcEntity): + kind = "availability_zone" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/availability_zones" + self.session = session + super(AvailabilityZone, self).__init__(session=session) + + def get_mgmt_url_by_name(self, entity_name: Optional[str] = None, **kwargs): + filter_criteria = f"name=={entity_name}" + kwargs["filter"] = filter_criteria + entity = super(AvailabilityZone, self).get_entity_by_name(entity_name, **kwargs) + if not entity: + raise Exception(f"AZ with name {entity_name} doesn't exist!") + mgmt_url = str(entity.get("status", {}).get("resources", {}).get("management_url")) + if not mgmt_url: + raise Exception("Couldn't fetch mgmt url") + return mgmt_url diff --git a/framework/scripts/python/helpers/v3/blueprint.py b/framework/scripts/python/helpers/v3/blueprint.py new file mode 100644 index 0000000..8368444 --- /dev/null +++ b/framework/scripts/python/helpers/v3/blueprint.py @@ -0,0 +1,14 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity + + +class Blueprint(PcEntity): + kind = 'blueprint' + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/blueprints" + super(Blueprint, self).__init__(session=session) + + def list(self, **kwargs): + filter_criteria = kwargs.pop('filters', 'state!=DELETED') + return super(Blueprint, self).list(filter=filter_criteria, **kwargs) diff --git a/framework/scripts/python/helpers/v3/category.py b/framework/scripts/python/helpers/v3/category.py new file mode 100644 index 0000000..f3564fc --- /dev/null +++ b/framework/scripts/python/helpers/v3/category.py @@ -0,0 +1,68 @@ +from typing import List + +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Category(PcEntity): + kind = "category" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/categories" + super(Category, self).__init__(session=session) + + def add_values(self, name: str, values: List): + """ + Add values to a given PC category + """ + self.batch_values_add(name, values) + + def get_values(self, name: str): + """ + Get the values of the category. + Args: + name(str): The name of the category + + Returns: + List, for example: + ['policy_counts': '{}', + 'uuid': 'f312d033-c03e-47f3-b77b-78e7ee5e3430', + 'value': u'2', + 'entity_counts': '{}'}] + """ + endpoint = f"{name}/list" + values = self.list(use_base_url=True, endpoint=endpoint) + return values + + def categories_with_values(self): + category_entity_list = self.list() + for category in category_entity_list: + category["values"] = [value.get("value") + for value in self.get_values(category["name"])] + + return category_entity_list + + def batch_values_add(self, category_list: List, **kwargs): + requests = [] + + for category in category_list: + name = category["name"] + values = category["values"] + endpoint = name + operation = kwargs.get("operation", "PUT") + # todo + base_url = f"api/nutanix//v3{self.resource_type}/{endpoint}" + + for value in values: + requests.append( + { + "operation": operation, + "body": {"value": value, "description": name}, + "path_and_params": f"{base_url}/{value}" + } + ) + + return self.batch_op.batch(api_request_list=requests) diff --git a/framework/scripts/python/helpers/v3/cloud_trust.py b/framework/scripts/python/helpers/v3/cloud_trust.py new file mode 100644 index 0000000..9a7d739 --- /dev/null +++ b/framework/scripts/python/helpers/v3/cloud_trust.py @@ -0,0 +1,28 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity + + +class CloudTrust(PcEntity): + kind = "cloud_trust" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/cloud_trusts" + super(CloudTrust, self).__init__(session=session) + + @staticmethod + def get_payload(cloud_type: str, remote_pc: str, remote_pc_username: str, remote_pc_password: str): + spec = { + "name": "", + "description": "", + "resources": { + "cloud_type": cloud_type, + "password": remote_pc_password, + "url": remote_pc, + "username": remote_pc_username + } + } + + payload = { + "spec": spec + } + return payload diff --git a/framework/scripts/python/helpers/v3/cluster.py b/framework/scripts/python/helpers/v3/cluster.py new file mode 100644 index 0000000..4784976 --- /dev/null +++ b/framework/scripts/python/helpers/v3/cluster.py @@ -0,0 +1,37 @@ +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Cluster(PcEntity): + kind = "cluster" + + def __init__(self, session: RestAPIUtil): + self.uuid_ip_map = {} + self.name_uuid_map = {} + self.resource_type = "/clusters" + super(Cluster, self).__init__(session=session) + + def get_pe_info_list(self): + """ + Set name: uuid and ip: name mapping for all the registered clusters + """ + clusters = self.list() + + for cluster in clusters: + if "PRISM_CENTRAL" in cluster["status"]["resources"]["config"]["service_list"]: + continue + ip = cluster["status"]["resources"]["network"].get("external_ip", None) + if cluster.get("spec", {}).get("name"): + name = cluster["spec"]["name"] + elif cluster.get("status", {}).get("name"): + name = cluster["status"]["name"] + else: + continue + uuid = cluster.get("metadata", {}).get("uuid") + self.name_uuid_map[name] = uuid + self.uuid_ip_map[uuid] = ip + + return diff --git a/framework/scripts/python/helpers/v3/network.py b/framework/scripts/python/helpers/v3/network.py new file mode 100644 index 0000000..c5ba7ec --- /dev/null +++ b/framework/scripts/python/helpers/v3/network.py @@ -0,0 +1,146 @@ +from typing import List +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Network(PcEntity): + kind = "subnet" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/subnets" + self.session = session + super(Network, self).__init__(session=session) + + def batch_create_network(self, subnet_create_payload_list: List): + return self.batch_op.batch_create(request_payload_list=subnet_create_payload_list) + + def get_uuid_by_name(self, cluster_name: str, subnet_name: str, **kwargs): + filter_criteria = f"cluster_name=={cluster_name};name=={subnet_name}" + kwargs["filter"] = filter_criteria + return super(Network, self).get_uuid_by_name(subnet_name, **kwargs) + + @staticmethod + def create_subnet_payload(**kwargs): + """ + Create Subnet Payload + Args: + **kwargs: + name (str) : Name of the subnet + subnet_type (str): VLAN (PC) or OVERLAY (Xi) + vlan_id (int): Vlan ID (PC) + subnet_ip (str): Subnet ip, eg 192.168.10.0 + vpc_id (str): the vpc id for subnet + prefix_length (int): Prefix length, eg, 24 + default_gateway_ip (str): Default gateway ip, eg, 192.168.10.1 + pool_list (list): list of dicts with ip ranges, eg, + [ {"range": "192.168.10.20 192.168.10.250"}] + cluster_uuid (str) : Cluster uuid + virtual_network_reference (dict): Virtual Network reference (Xi) + eg, {"kind": "virtual_network", + "uuid": "773d9dfd-aa48-44b0-a502-60f25d002576"} + is_external (bool): True to enable - "External Connectivity for VPCs" + Defaults to False. + + Returns: + dict + """ + name = kwargs.get("name") + subnet_type = kwargs.get("subnet_type", "VLAN") + vlan_id = kwargs.get("vlan_id", None) + subnet_ip = kwargs.get("network_ip", None) + vpc_id = kwargs.get("vpc_id", None) + prefix_length = kwargs.get("network_prefix", None) + default_gateway_ip = kwargs.get("default_gateway_ip", None) + pool_list = kwargs.get("pool_list", []) + dhcp_options = kwargs.get("dhcp_options", {}) + cluster_uuid = kwargs.get("cluster_uuid", None) + dhcp_server_address = kwargs.get("dhcp_server_address", None) + virtual_network_reference = kwargs.get("virtual_network_reference", {}) + is_external = kwargs.get("is_external", False) + enable_nat = kwargs.get("enable_nat", True) + + payload = { + "spec": { + "name": name, + "resources": { + "subnet_type": subnet_type, + "ip_config": {} + } + }, + "metadata": { + "kind": "subnet", + "name": name + }, + "api_version": "3.1.0" + } + + if vlan_id: + payload["spec"]["resources"].update({"vlan_id": vlan_id}) + + if vpc_id: + payload["spec"]["resources"].update( + { + "vpc_reference": { + "kind": "vpc", + "uuid": vpc_id + }}) + payload["spec"]["resources"].update( + { + "virtual_network_reference": { + "kind": "virtual_network", + "uuid": vpc_id + }}) + elif subnet_type == "OVERLAY": + # todo need to implement + pass + + if is_external: + payload["spec"]["resources"].update({"is_external": is_external}) + + if not enable_nat: + payload["spec"]["resources"].update({"enable_nat": enable_nat}) + + if cluster_uuid: + payload["spec"].update( + { + "cluster_reference": { + "kind": "cluster", + "uuid": cluster_uuid + } + }) + + if subnet_ip: + ip_config = { + "subnet_ip": subnet_ip, + "prefix_length": prefix_length, + "default_gateway_ip": default_gateway_ip, + "dhcp_options": dhcp_options, + "pool_list": pool_list + } + + if dhcp_server_address: + ip_config["dhcp_server_address"] = {"ip": dhcp_server_address} + payload["spec"]["resources"]["ip_config"] = ip_config + return payload + + def create_pc_subnet_payload(self, **kwargs): + """ + Build subnet create payload for PC + Args: + **kwargs: + name (str) : Name of the subnet + vlan_id (int): Vlan ID (PC) + subnet_ip (str): Subnet ip, eg 192.168.10.0 + prefix_length (int): Prefix length, eg, 24 + default_gateway_ip (str): Default gateway ip, eg, 192.168.10.1 + pool_list (list): list of dicts with ip ranges, eg, + [ {"range": "192.168.10.20 192.168.10.250"}] + cluster_uuid (str) : Cluster UUID + + Returns: + dict + """ + return self.create_subnet_payload(**kwargs) diff --git a/framework/scripts/python/helpers/v3/protection_rule.py b/framework/scripts/python/helpers/v3/protection_rule.py new file mode 100644 index 0000000..2473458 --- /dev/null +++ b/framework/scripts/python/helpers/v3/protection_rule.py @@ -0,0 +1,174 @@ +from copy import deepcopy +from helpers.general_utils import convert_to_secs +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger +from scripts.python.helpers.v3.availabilty_zone import AvailabilityZone + +logger = get_logger(__name__) + + +class ProtectionRule(PcEntity): + kind = "protection_rule" + + def __init__(self, session: RestAPIUtil): + self.remote_pe_clusters = self.source_pe_clusters = None + self.resource_type = "/protection_rules" + super(ProtectionRule, self).__init__(session) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "start_time": self._build_spec_start_time, + "protected_categories": self._build_spec_protected_categories, + "schedules": self._build_spec_schedules, + } + + def get_payload(self, pr_spec: dict, source_pe_clusters: dict, remote_pe_clusters: dict): + """ + Payload for creating a Protection Rule + """ + self.source_pe_clusters = source_pe_clusters + self.remote_pe_clusters = remote_pe_clusters + spec, error = super(ProtectionRule, self).get_spec(params=pr_spec) + if error: + raise Exception("Failed generating protection-rule spec: {}".format(error)) + + return spec + + @staticmethod + def _get_default_spec(): + return deepcopy( + { + "metadata": {"kind": "protection_rule"}, + "spec": { + "resources": { + "availability_zone_connectivity_list": [], + "ordered_availability_zone_list": [], + "category_filter": { + "params": {}, + "type": "CATEGORIES_MATCH_ANY", + }, + "primary_location_list": [], + }, + "name": None, + }, + } + ) + + @staticmethod + def _build_spec_name(payload, name): + payload["spec"]["name"] = name + return payload, None + + @staticmethod + def _build_spec_desc(payload, desc): + payload["spec"]["description"] = desc + return payload, None + + @staticmethod + def _build_spec_start_time(payload, start_time): + payload["spec"]["resources"]["start_time"] = start_time + return payload, None + + @staticmethod + def _build_spec_protected_categories(payload, categories): + payload["spec"]["resources"]["category_filter"]["params"] = categories + return payload, None + + def _build_spec_schedules(self, payload, schedules): + ordered_az_list = [] + az_connectivity_list = [] + + payload["spec"]["resources"]["primary_location_list"] = [0] + + # create ordered_availability_zone_list + for schedule in schedules: + if schedule.get("source") and schedule["source"] not in ordered_az_list: + if schedule["source"].get("availability_zone"): + az_pc = AvailabilityZone(self.session) + schedule["source"]["availability_zone_url"] = az_pc.get_mgmt_url_by_name("Local AZ") + else: + raise Exception("Unknown AZ specified in the schedule!") + if schedule["source"].get("cluster") and schedule["source"].get("availability_zone"): + cluster = schedule["source"].pop("cluster") + pc_ip = schedule["source"].pop("availability_zone") + schedule["source"]["cluster_uuid"] = self.source_pe_clusters[pc_ip][cluster] + else: + raise Exception("Unknown cluster specified in the schedule!") + ordered_az_list.append(schedule["source"]) + + if schedule.get("destination") and schedule["destination"] not in ordered_az_list: + if schedule["destination"].get("availability_zone"): + az_pc = AvailabilityZone(self.session) + schedule["destination"]["availability_zone_url"] = az_pc.get_mgmt_url_by_name( + f"PC_{schedule['destination']['availability_zone']}") + else: + raise Exception("Unknown AZ specified in the schedule!") + + if schedule["destination"].get("cluster") and schedule["destination"].get("availability_zone"): + cluster = schedule["destination"].pop("cluster") + pc_ip = schedule["destination"].pop("availability_zone") + schedule["destination"]["cluster_uuid"] = self.remote_pe_clusters[pc_ip][cluster] + else: + raise Exception("Unknown cluster specified in the schedule!") + ordered_az_list.append(schedule["destination"]) + payload["spec"]["resources"]["ordered_availability_zone_list"] = ordered_az_list + + # create availability_zone_connectivity_list from schedules + az_conn_list = [ + { + "source_availability_zone_index": 0, + "destination_availability_zone_index": 1, + "snapshot_schedule_list": [] + }, + { + "source_availability_zone_index": 1, + "destination_availability_zone_index": 0, + "snapshot_schedule_list": [] + } + ] + for schedule in schedules: + spec = {} + az_connection_spec = deepcopy(az_conn_list) + + if schedule["protection_type"] == "ASYNC": + if ( + not (schedule.get("rpo") and schedule.get("rpo_unit")) + and schedule.get("snapshot_type") + and (schedule.get("local_retention_policy") or schedule.get("remote_retention_policy") + ) + ): + return ( + None, + "rpo, rpo_unit, snapshot_type and atleast one policy are required fields for " + "asynchronous snapshot schedule", + ) + + spec["recovery_point_objective_secs"], err = convert_to_secs( + schedule["rpo"], schedule["rpo_unit"] + ) + if err: + return None, err + + spec["snapshot_type"] = schedule["snapshot_type"] + if schedule.get("local_retention_policy"): + spec["local_snapshot_retention_policy"] = schedule[ + "local_retention_policy" + ] + if schedule.get("remote_retention_policy"): + spec["remote_snapshot_retention_policy"] = schedule[ + "remote_retention_policy" + ] + else: + if schedule.get("auto_suspend_timeout"): + spec["auto_suspend_timeout_secs"] = schedule["auto_suspend_timeout"] + spec["recovery_point_objective_secs"] = 0 + + az_connection_spec[0]["snapshot_schedule_list"] = [spec] + az_connection_spec[1]["snapshot_schedule_list"] = [spec] + az_connectivity_list.extend(az_connection_spec) + + payload["spec"]["resources"][ + "availability_zone_connectivity_list" + ] = az_connectivity_list + return payload, None diff --git a/framework/scripts/python/helpers/v3/recovery_plan.py b/framework/scripts/python/helpers/v3/recovery_plan.py new file mode 100644 index 0000000..40a2716 --- /dev/null +++ b/framework/scripts/python/helpers/v3/recovery_plan.py @@ -0,0 +1,371 @@ +from copy import deepcopy +from typing import List + +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger +from scripts.python.helpers.v3.availabilty_zone import AvailabilityZone +from scripts.python.helpers.v3.vm import VM + +logger = get_logger(__name__) + + +class RecoveryPlan(PcEntity): + kind = "recovery_plan" + + def __init__(self, session: RestAPIUtil): + self.network_type = self.source_pe_clusters = self.primary_location_cluster_list = \ + self.recovery_location_cluster_list = None + self.resource_type = "/recovery_plans" + super(RecoveryPlan, self).__init__(session) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "primary_location": self._build_spec_primary_location, + "recovery_location": self._build_spec_recovery_location, + "stages": self._build_spec_stages, + "network_mappings": self._build_spec_network_mappings, + "floating_ip_assignments": self._build_spec_floating_ip_assignments, + } + + def get_payload(self, rp_spec: dict, source_pe_clusters: dict): + """ + Payload for creating a Recovery plan + """ + self.source_pe_clusters = source_pe_clusters + self.network_type = rp_spec.get("network_type", "NON_STRETCH") + spec, error = super(RecoveryPlan, self).get_spec(params=rp_spec) + if error: + raise Exception("Failed generating recovery-plan spec: {}".format(error)) + + return spec + + def _get_default_spec(self): + return deepcopy( + { + "api_version": "3.1.0", + "metadata": {"kind": "recovery_plan"}, + "spec": { + "resources": { + "parameters": { + "network_mapping_list": [], + "floating_ip_assignment_list": [], + "availability_zone_list": [{}, {}], + "primary_location_index": 0, + }, + "stage_list": [], + }, + "name": None, + }, + } + ) + + @staticmethod + def _build_spec_name(payload, name): + payload["spec"]["name"] = name + return payload, None + + @staticmethod + def _build_spec_desc(payload, desc): + payload["spec"]["description"] = desc + return payload, None + + def _build_spec_stages(self, payload, stages): + stage_list = [] + for stage in stages: + stage_spec = { + "stage_work": {"recover_entities": {"entity_info_list": None}} + } + + # for each stage add all vms and categories + stage_entities = [] + for vm in stage.get("vms", []): + vm_ref, err = self.get_vm_reference_spec(vm) + if err: + return None, err + vm_spec = {"any_entity_reference": vm_ref} + if vm.get("enable_script_exec"): + vm_spec["script_list"] = [ + {"enable_script_exec": vm["enable_script_exec"]} + ] + stage_entities.append(vm_spec) + + for category in stage.get("categories", []): + category_spec = {"categories": {category["key"]: category["value"]}} + if category.get("enable_script_exec"): + category_spec["script_list"] = [ + {"enable_script_exec": category["enable_script_exec"]} + ] + stage_entities.append(category_spec) + + stage_spec["stage_work"]["recover_entities"][ + "entity_info_list" + ] = stage_entities + + if stage.get("delay"): + stage_spec["delay_time_secs"] = stage["delay"] + stage_list.append(stage_spec) + + payload["spec"]["resources"]["stage_list"] = stage_list + return payload, None + + def _build_network_mapping_spec(self, config, network_type, are_network_stretched=False): + ntw_spec = {} + custom_ip_specs = [] + + # set custom IP mappings for vms + if config.get("custom_ip_config") and not are_network_stretched: + for ip_config in config["custom_ip_config"]: + vm_ref, err = self.get_vm_reference_spec(ip_config["vm"]) + if err: + return None, err + custom_ip_spec = { + "vm_reference": vm_ref, + "ip_config_list": [{"ip_address": ip_config["ip"]}], + } + custom_ip_specs.append(custom_ip_spec) + + # subnet related details for particular network + subnet_spec = {} + if config.get("gateway_ip"): + subnet_spec["gateway_ip"] = config["gateway_ip"] + if config.get("prefix"): + subnet_spec["prefix_length"] = int(config["prefix"]) + if config.get("external_connectivity_state"): + subnet_spec["external_connectivity_state"] = config[ + "external_connectivity_state" + ] + + # add to respective network config as per test or prod network + if network_type == "test": + if custom_ip_specs: + ntw_spec["test_ip_assignment_list"] = custom_ip_specs + ntw_spec["test_network"] = { + "name": config["name"], + } + if subnet_spec: + ntw_spec["test_network"]["subnet_list"] = [subnet_spec] + + else: + if custom_ip_specs: + ntw_spec["recovery_ip_assignment_list"] = custom_ip_specs + ntw_spec["recovery_network"] = { + "name": config["name"], + } + if subnet_spec: + ntw_spec["recovery_network"]["subnet_list"] = [subnet_spec] + + return ntw_spec + + def _build_spec_network_mappings(self, payload: dict, network_mappings: List): + + # set flag to apply these settings to all network mappings + are_network_stretched = False + if self.network_type == "STRETCH": + are_network_stretched = True + + # create primary and recovery location spec to be used in each network mappings + if self.primary_location_url: + primary_location = { + "availability_zone_url": self.primary_location_url + } + if self.primary_location_cluster_list: + primary_location["cluster_reference_list"] = self.primary_location_cluster_list + else: + primary_location_index = payload["spec"]["resources"]["parameters"][ + "primary_location_index" + ] + primary_location = payload["spec"]["resources"]["parameters"][ + "availability_zone_list" + ][primary_location_index] + + if self.recovery_location_url: + recovery_location = { + "availability_zone_url": self.recovery_location_url + } + if self.recovery_location_cluster_list: + recovery_location["cluster_reference_list"] = self.recovery_location_cluster_list + else: + recovery_location_index = ( + payload["spec"]["resources"]["parameters"]["primary_location_index"] ^ 1 + ) + recovery_location = payload["spec"]["resources"]["parameters"][ + "availability_zone_list" + ][recovery_location_index] + + network_mapping_specs = [] + for ntw in network_mappings: + spec = {} + + # add primary and recovery site networks each having test and production(also called recovery networks) + primary_site_ntw_spec = {} + recovery_site_ntw_spec = {} + if ntw["primary"].get("test"): + primary_site_ntw_spec.update( + self._build_network_mapping_spec( + ntw["primary"]["test"], "test", are_network_stretched + ) + ) + + if ntw["primary"].get("prod"): + primary_site_ntw_spec.update( + self._build_network_mapping_spec( + ntw["primary"]["prod"], "prod", are_network_stretched + ) + ) + + if ntw["recovery"].get("test"): + recovery_site_ntw_spec.update( + self._build_network_mapping_spec( + ntw["recovery"]["test"], "test", are_network_stretched + ) + ) + + if ntw["recovery"].get("prod"): + recovery_site_ntw_spec.update( + self._build_network_mapping_spec( + ntw["recovery"]["prod"], "prod", are_network_stretched + ) + ) + + primary_site_ntw_spec.update(primary_location) + recovery_site_ntw_spec.update(recovery_location) + + spec["are_networks_stretched"] = are_network_stretched + spec["availability_zone_network_mapping_list"] = [ + primary_site_ntw_spec, + recovery_site_ntw_spec, + ] + network_mapping_specs.append(spec) + + payload["spec"]["resources"]["parameters"][ + "network_mapping_list" + ] = network_mapping_specs + return payload, None + + def _build_spec_primary_location(self, payload, primary_location): + primary_location_index = payload["spec"]["resources"]["parameters"][ + "primary_location_index" + ] + + if primary_location.get("availability_zone"): + az_pc = AvailabilityZone(self.session) + self.primary_location_url = primary_location["url"] = az_pc.get_mgmt_url_by_name("Local AZ") + else: + raise Exception("Unknown AZ specified in the primary location!") + + spec = {"availability_zone_url": primary_location["url"]} + + if primary_location.get("cluster"): + cluster = primary_location.get("cluster") + pc_ip = primary_location.get("availability_zone") + cluster_uuid = self.source_pe_clusters[pc_ip][cluster] + self.primary_location_cluster_list = spec["cluster_reference_list"] = [{"uuid": cluster_uuid}] + + payload["spec"]["resources"]["parameters"]["availability_zone_list"][ + primary_location_index + ] = spec + return payload, None + + def _build_spec_recovery_location(self, payload, recovery_location): + recovery_location_index = ( + payload["spec"]["resources"]["parameters"]["primary_location_index"] ^ 1 + ) + + if recovery_location.get("availability_zone"): + az_pc = AvailabilityZone(self.session) + self.recovery_location_url = recovery_location["url"] = \ + az_pc.get_mgmt_url_by_name(f"PC_{recovery_location['availability_zone']}") + else: + raise Exception("Unknown AZ specified in the recovery location!") + + spec = {"availability_zone_url": recovery_location["url"]} + + if recovery_location.get("cluster"): + cluster = recovery_location.get("cluster") + pc_ip = recovery_location.get("availability_zone") + cluster_uuid = self.source_pe_clusters[pc_ip][cluster] + spec["cluster_reference_list"] = [{"uuid": cluster_uuid}] + self.recovery_location_cluster_list = spec["cluster_reference_list"] = \ + [{"uuid": recovery_location["cluster"]}] + + payload["spec"]["resources"]["parameters"]["availability_zone_list"][ + recovery_location_index + ] = spec + return payload, None + + def _build_spec_floating_ip_assignments(self, payload, floating_ip_assignments): + floating_ip_assignment_specs = [] + for config in floating_ip_assignments: + floating_ip_assignment_spec = {"availability_zone_url": config[ + "availability_zone_url" + ]} + vm_ip_assignment_specs = [] + for ip_spec in config["vm_ip_assignments"]: + ip_assignment_spec = {} + + # add vm reference + vm_ref, err = self.get_vm_reference_spec(ip_spec["vm"]) + if err: + return None, err + ip_assignment_spec["vm_reference"] = vm_ref + + # add nic info + ip_assignment_spec["vm_nic_information"] = { + "uuid": ip_spec["vm_nic_info"]["uuid"] + } + if ip_spec["vm_nic_info"].get("ip"): + ip_assignment_spec["vm_nic_information"]["ip"] = ip_spec[ + "vm_nic_info" + ]["ip"] + + # test floating ip config + if ip_spec.get("test_ip_config"): + ip_assignment_spec["test_floating_ip_config"] = { + "ip": ip_spec["test_ip_config"]["ip"] + } + if ip_spec["test_ip_config"].get("allocate_dynamically"): + ip_assignment_spec["test_floating_ip_config"][ + "should_allocate_dynamically" + ] = ip_spec["test_ip_config"]["allocate_dynamically"] + + # recovery floating ip config + if ip_spec.get("prod_ip_config"): + ip_assignment_spec["recovery_floating_ip_config"] = { + "ip": ip_spec["prod_ip_config"]["ip"] + } + if ip_spec["prod_ip_config"].get("allocate_dynamically"): + ip_assignment_spec["recovery_floating_ip_config"][ + "should_allocate_dynamically" + ] = ip_spec["prod_ip_config"]["allocate_dynamically"] + + vm_ip_assignment_specs.append(ip_assignment_spec) + + floating_ip_assignment_spec[ + "vm_ip_assignment_list" + ] = vm_ip_assignment_specs + floating_ip_assignment_specs.append(floating_ip_assignment_spec) + + payload["spec"]["resources"]["parameters"][ + "floating_ip_assignment_list" + ] = floating_ip_assignment_specs + return payload, None + + def get_vm_reference_spec(self, vm_config: dict): + uuid = vm_config.get("uuid", "") + name = vm_config.get("name", "") + if ("name" not in vm_config) and ("uuid" not in vm_config): + return None, "Provide name or uuid for building vm reference spec" + elif "name" not in vm_config: + vm = VM(self.session) + resp = vm.read(vm_config["uuid"]) + name = resp["status"]["name"] + elif "uuid" not in vm_config: + vm = VM(self.session) + uuid = vm.get_uuid_by_name(vm_config.get("name")) + if not uuid: + error = f"VM {0} not found.".format(name) + return Exception(error) + + vm_ref_spec = {"kind": "vm", "name": name, "uuid": uuid} + return vm_ref_spec, None diff --git a/framework/scripts/python/helpers/v3/security_rule.py b/framework/scripts/python/helpers/v3/security_rule.py new file mode 100644 index 0000000..80fe7e2 --- /dev/null +++ b/framework/scripts/python/helpers/v3/security_rule.py @@ -0,0 +1,204 @@ +from copy import deepcopy + +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity +from scripts.python.helpers.v3.address_group import AddressGroup +from scripts.python.helpers.v3.service_group import ServiceGroup +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class SecurityPolicy(PcEntity): + kind = "network_security_rule" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/network_security_rules" + self.session = session + super(SecurityPolicy, self).__init__(session) + + def _get_default_spec(self): + return deepcopy( + { + "metadata": {"kind": "network_security_rule"}, + "spec": { + "name": None, + "resources": {"is_policy_hitlog_enabled": False}, + }, + } + ) + + def create_security_policy_spec(self, sp_info): + spec = self._get_default_spec() + # Get the name + self._build_spec_name(spec, sp_info["name"]) + # Get description + self._build_spec_desc(spec, sp_info.get("description")) + + self._build_allow_ipv6_traffic(spec, sp_info.get("allow_ipv6_traffic", False)) + + self._build_is_policy_hitlog_enabled(spec, sp_info.get("hitlog", True)) + + # App policy + self._build_app_rule(spec, sp_info.get("app_rule")) + + return spec + + @staticmethod + def _build_spec_name(payload, value): + payload["spec"]["name"] = value + return payload, None + + @staticmethod + def _build_spec_desc(payload, value): + payload["spec"]["description"] = value + return payload, None + + @staticmethod + def _build_allow_ipv6_traffic(payload, value): + payload["spec"]["resources"]["allow_ipv6_traffic"] = value + return payload, None + + @staticmethod + def _build_is_policy_hitlog_enabled(payload, value): + payload["spec"]["resources"]["is_policy_hitlog_enabled"] = value + return payload, None + + def _build_app_rule(self, payload, value): + app_rule = payload["spec"]["resources"].get("app_rule", {}) + payload["spec"]["resources"]["app_rule"] = self._build_spec_rule( + app_rule, value + ) + return payload, None + + def _build_spec_rule(self, payload, value): + rule = payload + + if value.get("target_group"): + target_group = {} + params = {} + categories = value["target_group"].get("categories", {}) + if categories.get("ADGroup"): + params["ADGroup"] = [categories["ADGroup"]] + if value["target_group"].get("default_internal_policy"): + target_group["default_internal_policy"] = value["target_group"][ + "default_internal_policy" + ] + if categories.get("AppType"): + params["AppType"] = [categories["AppType"]] + if categories.get("AppTier"): + params["AppTier"] = [categories.get("AppTier")] + if value["target_group"].get("default_internal_policy"): + target_group["default_internal_policy"] = value["target_group"][ + "default_internal_policy" + ] + if categories.get("apptype_filter_by_category"): + params.update(**categories["apptype_filter_by_category"]) + + target_group["filter"] = ( + payload.get("target_group", {}).get("filter") + or self._get_default_filter_spec() + ) + if params: + target_group["filter"]["params"] = params + target_group["peer_specification_type"] = "FILTER" + payload["target_group"] = target_group + + if value.get("inbounds"): + rule["inbound_allow_list"] = self._generate_bound_spec( + rule.get("inbound_allow_list", []), value["inbounds"] + ) + elif value.get("allow_all_inbounds"): + rule["inbound_allow_list"] = [{"peer_specification_type": "ALL"}] + if value.get("outbounds"): + rule["outbound_allow_list"] = self._generate_bound_spec( + rule.get("outbound_allow_list", []), value["outbounds"] + ) + elif value.get("allow_all_outbounds"): + rule["outbound_allow_list"] = [{"peer_specification_type": "ALL"}] + if value.get("policy_mode"): + rule["action"] = value["policy_mode"] + return rule + + def _generate_bound_spec(self, payload, list_of_rules): + for rule in list_of_rules: + if rule.get("rule_id"): + rule_spec = self._filter_by_uuid(rule["rule_id"], payload) + if rule.get("state") == "absent": + payload.remove(rule_spec) + continue + else: + rule_spec = {} + if rule.get("categories"): + rule_spec["filter"] = self._get_default_filter_spec() + rule_spec["filter"]["params"] = rule["categories"] + rule_spec["peer_specification_type"] = "FILTER" + elif rule.get("ip_subnet"): + rule_spec["ip_subnet"] = rule["ip_subnet"] + rule_spec["peer_specification_type"] = "IP_SUBNET" + elif rule.get("address"): + address_group = rule["address"] + + if address_group.get("uuid"): + address_group["kind"] = "address_group" + rule_spec["address_group_inclusion_list"] = [address_group] + elif address_group.get("name"): + ag = AddressGroup(self.session) + uuid = ag.get_uuid_by_name(address_group["name"]) + + if not uuid: + raise Exception(f"Cannot find the Address Group {address_group['name']}!") + + address_group["kind"] = "address_group" + address_group["uuid"] = uuid + rule_spec["address_group_inclusion_list"] = [address_group] + + rule_spec["peer_specification_type"] = "IP_SUBNET" + + if rule.get("protocol"): + self._generate_protocol_spec(rule_spec, rule["protocol"]) + if rule.get("description"): + rule_spec["description"] = rule["description"] + if not rule_spec.get("rule_id"): + payload.append(rule_spec) + return payload + + def _generate_protocol_spec(self, payload, config): + if config.get("tcp"): + payload["protocol"] = "TCP" + payload["tcp_port_range_list"] = config["tcp"] + elif config.get("udp"): + payload["protocol"] = "UDP" + payload["udp_port_range_list"] = config["udp"] + elif config.get("icmp"): + payload["protocol"] = "ICMP" + payload["icmp_type_code_list"] = config["icmp"] + elif config.get("service"): + service = config["service"] + + if service.get("uuid"): + service["kind"] = "service_group" + payload["service_group_list"] = [service] + elif service.get("name"): + sg = ServiceGroup(self.session) + uuid = sg.get_uuid_by_name(service["name"]) + + if not uuid: + raise Exception(f"Cannot find the Address Group {service['name']}!") + + service["kind"] = "service_group" + service["uuid"] = uuid + payload["service_group_list"] = [service] + + @staticmethod + def _get_default_filter_spec(): + return deepcopy( + {"type": "CATEGORIES_MATCH_ALL", "kind_list": ["vm"], "params": {}} + ) + + @staticmethod + def _filter_by_uuid(uuid, items_list): + try: + return next(filter(lambda d: d.get("rule_id") == uuid, items_list)) + except Exception as e: + raise e diff --git a/framework/scripts/python/helpers/v3/service.py b/framework/scripts/python/helpers/v3/service.py new file mode 100644 index 0000000..6d4ceab --- /dev/null +++ b/framework/scripts/python/helpers/v3/service.py @@ -0,0 +1,72 @@ +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.pc_entity import PcEntity + +logger = get_logger(__name__) + + +class Service(PcEntity): + kind = "service" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/services" + super(Service, self).__init__(session) + + def get_microseg_status(self): + """ + Get the service status + Returns: + str, for example, ENABLED, ENABLING + """ + return self._get_service_status("microseg") + + def get_dr_status(self): + """ + Get the service status + Returns: + str, for example, ENABLED, ENABLING + """ + return self._get_service_status("disaster_recovery") + + def enable_microseg(self): + """ + Enable microseg service + Returns: + {"task_uuid": "9063a53d-e043-4c2c-807b-fd8de3168604"} + """ + return self._enable_service("microseg") + + def enable_leap(self): + """ + Enable leap service + Returns: + {"task_uuid": "9063a53d-e043-4c2c-807b-fd8de3168604"} + """ + return self._enable_service("disaster_recovery") + + def _get_service_status(self, name: str): + """ + Get the service status + Args: + name(str): The name of the service + Returns: + str, for example, ENABLED, ENABLING + """ + endpoint = f"{name}/status" + response = self.read(endpoint=endpoint) + return response.get("service_enablement_status") + + def _enable_service(self, name: str): + """ + Enable the service + Args: + name(str): The name of the service + Returns: + dict, the api response. example: + {"task_uuid": "9063a53d-e043-4c2c-807b-fd8de3168604"} + """ + endpoint = name + payload = { + 'state': 'ENABLE' + } + return self.create(data=payload, endpoint=endpoint) diff --git a/framework/scripts/python/helpers/v3/service_group.py b/framework/scripts/python/helpers/v3/service_group.py new file mode 100644 index 0000000..99ebe89 --- /dev/null +++ b/framework/scripts/python/helpers/v3/service_group.py @@ -0,0 +1,93 @@ +from copy import deepcopy +from typing import Optional +from helpers.log_utils import get_logger +from scripts.python.helpers.pc_entity import PcEntity + +logger = get_logger(__name__) + + +class ServiceGroup(PcEntity): + kind = "service_group" + + def __init__(self, module): + self.resource_type = "/service_groups" + super(ServiceGroup, self).__init__(module) + + def get_uuid_by_name(self, entity_name: Optional[str] = None, entity_data: Optional[dict] = None, **kwargs): + kwargs.pop("filter", None) + filter_criteria = f"name=={entity_name}" + response = self.list(filter=filter_criteria, **kwargs) + + for entity in response: + if entity.get("service_group", {}).get("name") == entity_name: + return entity.get("uuid") + + def create_service_group_spec(self, sg_info): + spec = self._get_default_spec() + # Get the name + self._build_spec_name(spec, sg_info["name"]) + # Get description + self._build_spec_desc(spec, sg_info.get("description")) + + # Get service_list + service_details = sg_info.get("service_details", {}) + for protocol, values in service_details.items(): + self._build_spec_service_details(spec, {protocol: values}) + + logger.debug(spec) + return spec + + def _get_default_spec(self): + return deepcopy( + { + "name": None, + "is_system_defined": False, + "service_list": [], + } + ) + + @staticmethod + def _build_spec_name(payload, value): + payload["name"] = value + + @staticmethod + def _build_spec_desc(payload, value): + payload["description"] = value + + def _build_spec_service_details(self, payload, config): + + service = None + if config.get("tcp"): + service = {"protocol": "TCP"} + port_range_list = self.generate_port_range_list(config["tcp"]) + service["tcp_port_range_list"] = port_range_list + + if config.get("udp"): + service = {"protocol": "UDP"} + port_range_list = self.generate_port_range_list(config["udp"]) + service["udp_port_range_list"] = port_range_list + + if config.get("icmp"): + service = {"protocol": "ICMP", "icmp_type_code_list": config["icmp"]} + elif config.get("any_icmp"): + service = {"protocol": "ICMP", "icmp_type_code_list": []} + + if not service: + logger.error("Unsupported Protocol") + return + payload["service_list"].append(service) + + return payload, None + + @staticmethod + def generate_port_range_list(config): + port_range_list = [] + if "*" not in config: + for port in config: + port = port.split("-") + port_range_list.append( + {"start_port": int(port[0]), "end_port": int(port[-1])} + ) + else: + port_range_list.append({"start_port": 0, "end_port": 65535}) + return port_range_list diff --git a/framework/scripts/python/helpers/v3/task.py b/framework/scripts/python/helpers/v3/task.py new file mode 100644 index 0000000..5a90a3b --- /dev/null +++ b/framework/scripts/python/helpers/v3/task.py @@ -0,0 +1,37 @@ +from typing import List + +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Task(PcEntity): + kind = "task" + + def __init__(self, session): + self.resource_type = "/tasks" + super(Task, self).__init__(session) + + def poll(self, task_uuid_list: List, poll_timeout_secs=30): + """ + Call tasks/poll api to poll till the task_uuid is complete + Args: + task_uuid_list (list) : List of Task UUIDs to Poll + poll_timeout_secs (int): Poll Timeout, default to 30 Secs + + Returns: + Response + """ + endpoint = "poll" + + payload = { + "task_uuid_list": task_uuid_list, + "poll_timeout_seconds": poll_timeout_secs + } + + # Add 5 sec more for the Rest timeout, so that it doesn't bail + # before task/poll returns + return self.create(data=payload, endpoint=endpoint, timeout=poll_timeout_secs+5) + + # todo don't make other PcEntity methods available for Tasks diff --git a/framework/scripts/python/helpers/v3/vm.py b/framework/scripts/python/helpers/v3/vm.py new file mode 100644 index 0000000..2aab8f7 --- /dev/null +++ b/framework/scripts/python/helpers/v3/vm.py @@ -0,0 +1,13 @@ +from copy import deepcopy +from scripts.python.helpers.pc_entity import PcEntity +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class VM(PcEntity): + kind = "vm" + + def __init__(self, session): + self.resource_type = "/vms" + super(VM, self).__init__(session) diff --git a/framework/scripts/python/helpers/v4/__init__.py b/framework/scripts/python/helpers/v4/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/scripts/python/image_cluster_script.py b/framework/scripts/python/image_cluster_script.py new file mode 100644 index 0000000..897a7db --- /dev/null +++ b/framework/scripts/python/image_cluster_script.py @@ -0,0 +1,138 @@ +import sys +import time +from scripts.python.script import Script +from scripts.python.helpers.fc.imaged_clusters import ImagedCluster +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class ImageClusterScript(Script): + """ + Foundation Central Image Cluster Script + """ + def __init__(self, data: dict, cluster_data: dict, imaging_obj: ImagedCluster = None): + """ + Args: + data (dict): proved json data + cluster_data (dict): Updated cluster data + imaging_obj (object, optional): Imaging cluster object. Defaults to None. + """ + self.data = data + self.cluster_data = cluster_data + if imaging_obj: + self.imaging = imaging_obj + else: + self.imaging = ImagedCluster(self.data["pc_session"]) + super(ImageClusterScript, self).__init__() + + def execute(self): + """ + Run Image cluste nodes in Foundation Central + """ + spec, error = self.imaging.get_spec(params=self.cluster_data) + if error: + self.exceptions.append("Failed generating Image Nodes Spec: {}".format(error)) + sys.exit(1) + logger.debug("Image Node Spec: {}".format(spec)) + resp = self.imaging.create(spec) + self.imaged_cluster_uuid = resp["imaged_cluster_uuid"] + logger.debug("imaged_cluster_uuid for cluster {}: {}".format( + self.cluster_data["cluster_name"], self.imaged_cluster_uuid)) + + def verify(self): + """ + Verify Cluster deployment status + """ + state = "" + delay = 60 + logger.info("Wait for 15 minutes to monitor cluster {} status".format(self.cluster_data["cluster_name"])) + time.sleep(15 * 60) + timeout = time.time() + (3 * 60 * 60) + while state != "COMPLETED": + response = self.imaging.read(self.imaged_cluster_uuid) + stopped = response["cluster_status"]["imaging_stopped"] + aggregate_percent_complete = response["cluster_status"][ + "aggregate_percent_complete" + ] + if stopped: + if aggregate_percent_complete < 100: + message = "Imaging/Creation stopped/failed before completion. See below details for deployment status:" + status = self._get_progress_error_status(response, message) + logger.error(status) + else: + message = "Imaging/Creation Completed." + status = self._get_progress_error_status(response, message) + logger.info(status) + state = "COMPLETED" + else: + state = "PENDING" + status = self._get_progress_error_status(response) + if time.time() > timeout: + message = "Failed to poll on image node progress. Reason: Timeout\nStatus: " + status = self._get_progress_error_status(response, message) + logger.info(status) + time.sleep(delay) + + def _get_progress_error_status(self, progress: dict, message: str = ""): + """Get the status of node and cluster progress + + Args: + progress (dict): Response dict + message (str, optional): Message to add while displaying status. Defaults to "". + + Returns: + str: Status of node and cluster progress + """ + return "{0}\nClusters: {1}\nNodes: {2}".format( + message, + self._get_cluster_progress_messages( + progress, "cluster_progress_details", "cluster_name" + ), + self._get_node_progress_messages( + progress, "node_progress_details", "imaged_node_uuid" + ), + ) + + def _get_cluster_progress_messages(self, progress: dict, entity_type: str, entity_name: str): + """Cluster progress messages + + Args: + progress (dict): Response dictionary + entity_type (str): Entity type to filter + entity_name (str): Entity name to filter + + Returns: + str: Cluster progress messages + """ + res = "" + cluster = progress["cluster_status"][entity_type] + if cluster is not None: + if cluster.get(entity_name): + res += "\n\tcluster_name: {0}\n".format(cluster[entity_name]) + if cluster.get("status"): + res += "\tstatus: {0}\n".format(cluster["status"]) + if cluster.get("message_list"): + res += "\tmessage: {0}\n".format("\n".join(cluster["message_list"])) + return res + + def _get_node_progress_messages(self, progress: dict, entity_type: str, entity_name: str): + """Node progress messages + + Args: + progress (dict): Response dictionary + entity_type (str): Entity type to filter + entity_name (str): Entity name to filter + + Returns: + str: Node progress messages + """ + res = "" + nodes = progress["cluster_status"][entity_type] + if nodes: + for node in nodes: + res += "\n\tnode_uuid: {0}\n".format(node[entity_name]) + res += "\tstatus: {0}\n".format(node["status"]) + if node.get("message_list"): + res += "\tmessage: {0}\n".format("\n".join(node["message_list"])) + return res diff --git a/framework/scripts/python/init_calm_dsl.py b/framework/scripts/python/init_calm_dsl.py new file mode 100644 index 0000000..8e46a0e --- /dev/null +++ b/framework/scripts/python/init_calm_dsl.py @@ -0,0 +1,33 @@ +from helpers.log_utils import get_logger +from scripts.python.script import Script +from calm.dsl.cli import set_server_details, init_db, sync_cache + +logger = get_logger(__name__) + + +class InitCalmDsl(Script): + def __init__(self, data: dict): + self.data = data + super(InitCalmDsl, self).__init__() + + def execute(self, **kwargs): + try: + logger.info("Initializing Calm DSL...") + set_server_details( + ip=self.data['pc_ip'], + port="9440", + username=self.data['pc_username'], + password=self.data['pc_password'], + project_name=self.data['project_name'], + config_file=None, + local_dir=None, + db_file=None + ) + init_db() + sync_cache() + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + logger.info(f"No verification needed for {type(self).__name__}") + pass diff --git a/framework/scripts/python/initial_cluster_config.py b/framework/scripts/python/initial_cluster_config.py new file mode 100644 index 0000000..a822e58 --- /dev/null +++ b/framework/scripts/python/initial_cluster_config.py @@ -0,0 +1,93 @@ +import time +from scripts.python.cluster_script import ClusterScript +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.v1.utils_manager import UtilsManager +from scripts.python.helpers.v1.eula import Eula +from scripts.python.helpers.v1.pulse import Pulse +from scripts.python.helpers.v2.cluster import Cluster as PeCluster +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class InitialClusterConfig(ClusterScript): + """ + Accept Eula + Enable Pulse + """ + DEFAULT_USERNAME = "admin" + DEFAULT_SYSTEM_PASSWORD = "Nutanix/4u" + + def __init__(self, data: dict, **kwargs): + super(InitialClusterConfig, self).__init__(data, **kwargs) + + @staticmethod + def change_default_password(pe_session: RestAPIUtil, new_pe_password: str, cluster_info): + default_system_password = UtilsManager(pe_session) + default_system_password.change_default_system_password(new_pe_password, cluster_info) + # Waiting for password sync + time.sleep(30) + + @staticmethod + def accept_eula(pe_session: RestAPIUtil, data: dict, cluster_info): + eula = Eula(pe_session) + + if eula.is_eula_accepted(): + logger.warning(f"Eula is already accepted for the cluster {cluster_info}") + return + eula.accept_eula(**data, cluster_info=cluster_info) + + @staticmethod + def update_pulse(pe_session: RestAPIUtil, enable_pulse: bool, cluster_info): + pulse = Pulse(session=pe_session) + pulse.update_pulse(enable=enable_pulse, cluster_info=cluster_info) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + pe_session = cluster_details["pe_session"] + + new_pe_password = cluster_details.get("pe_password") + + if new_pe_password == self.DEFAULT_SYSTEM_PASSWORD: + logger.error(f"New Password specified is same as default password for the cluster ...") + return + + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + + default_pe_session = RestAPIUtil(cluster_ip, user=self.DEFAULT_USERNAME, + pwd=self.DEFAULT_SYSTEM_PASSWORD, + port="9440", secured=True) + try: + self.change_default_password(default_pe_session, new_pe_password, cluster_info) + except Exception as e: + self.exceptions.append(f"Change_default_password failed for the cluster {cluster_info} with the error: {e}") + try: + self.accept_eula(pe_session, cluster_details.get("eula"), cluster_info) + except Exception as e: + self.exceptions.append(f"Accept_eula failed for the cluster {cluster_info} with the error: {e}") + try: + self.update_pulse(pe_session, cluster_details.get("enable_pulse", False), cluster_info) + except Exception as e: + self.exceptions.append(f"Update_pulse failed for the cluster {cluster_info} with the error: {e}") + + def verify(self): + # default password shouldn't work + cluster_list = [] + endpoint = "cluster" + for cluster_ip, cluster_details in self.pe_clusters.items(): + try: + default_pe_session = RestAPIUtil(cluster_ip, user=self.DEFAULT_USERNAME, + pwd=self.DEFAULT_SYSTEM_PASSWORD, + port="9440", secured=True) + cluster_obj = PeCluster(default_pe_session) + cluster_obj.read(endpoint=endpoint) + except Exception: + # if it fails, i.e default password doesn't work, password is changed + continue + cluster_list.append(cluster_ip) + + if cluster_list: + logger.warning(f"Password change failed for the clusters: {cluster_list}") diff --git a/framework/scripts/python/launch_calm_bp.py b/framework/scripts/python/launch_calm_bp.py new file mode 100644 index 0000000..3f31dad --- /dev/null +++ b/framework/scripts/python/launch_calm_bp.py @@ -0,0 +1,64 @@ +import logging +import json +import os +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.application_state_monitor import ApplicationStateMonitor +from scripts.python.script import Script +from calm.dsl.cli import launch_blueprint_simple +from scripts.python.helpers.v3.application import Application + +logger = get_logger(__name__) + +class LaunchBp(Script): + def __init__(self, data: dict): + self.data = data + super(LaunchBp, self).__init__() + + def execute(self, **kwargs): + session = RestAPIUtil(self.data["pc_ip"], user=self.data["pc_username"], pwd=self.data["pc_password"], + port="9440", secured=True) + + # Get the BPs list + for bp in self.data["bp_list"]: + logging.info(f"Creating app {bp['app_name']} from the blueprint {bp['name']}") + + # open a new file for writing + with open('launch_params.py', 'w') as f: + f.write('variable_list = ') + json.dump(bp['variable_list'], f) + f.write('\n') + + try: + launch_blueprint_simple( + blueprint_name=bp['name'], + app_name=bp['app_name'], + launch_params=f"launch_params.py" + ) + except Exception as e: + raise e + finally: + if 'f' in locals(): + f.close() + # Delete the project file + os.remove(f"launch_params.py") + + # Monitoring application status + application_op = Application(session) + application_uuid = application_op.get_uuid_by_name(bp['app_name']) + + if application_uuid: + logger.info("Application is being provisioned") + app_response, status = ApplicationStateMonitor(session, + application_uuid=application_uuid).monitor() + if not status or not app_response: + raise Exception("Application deployment failed") + else: + logger.info("Application deployment successful") + else: + logger.warning("Could not fetch application uuid to monitor. Application might or " + "might not be running") + raise Exception("Stopped") + + def verify(self, **kwargs): + pass diff --git a/framework/scripts/python/register_pe_to_pc.py b/framework/scripts/python/register_pe_to_pc.py new file mode 100644 index 0000000..691adcf --- /dev/null +++ b/framework/scripts/python/register_pe_to_pc.py @@ -0,0 +1,88 @@ +from helpers.log_utils import get_logger +from helpers.rest_utils import RestAPIUtil +from scripts.python.helpers.state_monitor.pc_register_monitor import PcRegisterMonitor +from scripts.python.helpers.v1.multicluster import MultiCluster +from scripts.python.helpers.v2.cluster import Cluster as PeCluster +from scripts.python.cluster_script import ClusterScript + +logger = get_logger(__name__) + + +class RegisterToPc(ClusterScript): + """ + Class that takes multiple clusters and registers them to PC + """ + SYNC_TIME = 300 + + def __init__(self, data: dict, **kwargs): + self.data = data + self.pc_ip = self.data["pc_ip"] + self.pc_session = self.data["pc_session"] + self.pe_uuids = [] + super(RegisterToPc, self).__init__(data, **kwargs) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + pe_session = cluster_details["pe_session"] + + # get Cluster UUIDs + # logger.info(f"Getting the UUID of the cluster {cluster_ip}...") + if not cluster_details.get("cluster_info", {}).get("uuid"): + cluster = PeCluster(pe_session) + cluster.get_cluster_info() + cluster_details["cluster_info"].update(cluster.cluster_info) + self.pe_uuids.append(cluster_details["cluster_info"]["uuid"]) + + try: + _ = self.register_cluster(cluster_ip, pe_session) + except Exception as e: + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + self.exceptions.append(f"{type(self).__name__} failed for the cluster {cluster_info} " + f"with the error: {e}") + + def register_cluster(self, pe_ip: str, pe_session: RestAPIUtil) -> bool: + cluster = MultiCluster(pe_session) + + # check if cluster is already registered to a PC + response = cluster.get_cluster_external_state() + + if response: + pc_ip = "" + for data in response: + if data.get('clusterDetails'): + pc_ip = data['clusterDetails'].get("ipAddresses", [None])[0] + + if pc_ip: + logger.warning(f"Cluster {pe_ip} is already registered to a PC with IP: {pc_ip}") + return False + + response = cluster.register_pe_to_pc(pe_ip=pe_ip, + pc_ip=self.pc_ip, + pc_username=self.data["pc_username"], + pc_password=self.data["pc_password"]) + + exception_msg = f"Failed to register {pe_ip}. Got the following response for " \ + f"'add_to_multicluster' API: {response}" + if isinstance(response, dict): + value = response.get("value", None) + if not value: + self.exceptions.append(exception_msg) + elif isinstance(response, str): + if "Already added to multi-cluster" not in response: + self.exceptions.append(exception_msg) + else: + self.exceptions.append(exception_msg) + + return True + + def verify(self, **kwargs): + # Monitor PC registration - Checks given PE clusters are successfully + # registered to PC. + app_response, status = PcRegisterMonitor(self.pc_session, + pe_uuids=self.pe_uuids).monitor() + + if not status: + self.exceptions.append("Timed out. Registration of clusters to PC didn't happen in the prescribed timeframe") diff --git a/framework/scripts/python/script.py b/framework/scripts/python/script.py new file mode 100644 index 0000000..31e00b3 --- /dev/null +++ b/framework/scripts/python/script.py @@ -0,0 +1,43 @@ +import threading +from abc import abstractmethod, ABC +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class Script(ABC): + def __init__(self): + self.name = type(self).__name__ + self.exceptions = [] + self.num_total_scripts = 0 + self.num_passed_scripts = 0 + self.pass_rate = 0.0 + + def run(self, **kwargs): + current_thread = threading.current_thread() + + if current_thread != threading.main_thread(): + current_thread_name = current_thread.name.split("-")[-1] + current_thread.name = f"Thread-{current_thread_name}-{type(self).__name__}" + + self.execute(**kwargs) + self.verify(**kwargs) + + if self.exceptions: + for exception in self.exceptions: + logger.error(f"{self.name}: {exception}") + + if self.num_total_scripts != 0: + self.pass_rate = self.num_passed_scripts / self.num_total_scripts + else: + self.pass_rate = 100.0 + + return self.pass_rate + + @abstractmethod + def execute(self, **kwargs): + pass + + @abstractmethod + def verify(self, **kwargs): + pass diff --git a/framework/scripts/python/update_calm_project.py b/framework/scripts/python/update_calm_project.py new file mode 100644 index 0000000..3f9f372 --- /dev/null +++ b/framework/scripts/python/update_calm_project.py @@ -0,0 +1,49 @@ +import os +from calm.dsl.cli.projects import update_project_from_dsl +from helpers.log_utils import get_logger +from scripts.python.script import Script +from jinja2 import Template + +logger = get_logger(__name__) + + +class UpdateCalmProject(Script): + def __init__(self, data: dict): + self.data = data + super(UpdateCalmProject, self).__init__() + + def execute(self, **kwargs): + logger.info("Update the project...") + helper_directory = f"{self.data['project_root']}/framework/scripts/python/helpers" + project_update_helper = "update_project_dsl.py.jinja" + + with open(f"{helper_directory}/{project_update_helper}", 'r') as f: + template = Template(f.read()) + data = { + 'NTNX_ACCOUNT': self.data['account_name'], + 'SUBNET_CLUSTER_MAPPING': self.data['subnets'] + } + output = template.render(data) + + project_file = "update_project_dsl.py" + try: + with open(f"{helper_directory}/{project_file}", 'w') as f: + f.write(output) + + update_project_from_dsl( + self.data['project_name'], + f"{helper_directory}/{project_file}", + no_cache_update=False, + append_only=True + ) + except Exception as e: + raise e + finally: + if 'f' in locals(): + f.close() + # Delete the project file + os.remove(f"{helper_directory}/{project_file}") + + def verify(self, **kwargs): + # no verification needed for dsl + pass diff --git a/framework/scripts/python/update_dsip_pe.py b/framework/scripts/python/update_dsip_pe.py new file mode 100644 index 0000000..a5547a1 --- /dev/null +++ b/framework/scripts/python/update_dsip_pe.py @@ -0,0 +1,52 @@ +from scripts.python.helpers.v2.cluster import Cluster as PeCluster +from scripts.python.cluster_script import ClusterScript +from helpers.log_utils import get_logger + +logger = get_logger(__name__) + + +class UpdateDsip(ClusterScript): + """ + Update DSIP for the input PE clusters + """ + + def __init__(self, data: dict, **kwargs): + super(UpdateDsip, self).__init__(data, **kwargs) + + def execute_single_cluster(self, cluster_ip: str, cluster_details: dict): + # Only for parallel runs + if self.parallel: + self.set_current_thread_name(cluster_ip) + + if not cluster_details.get("dsip"): + logger.warning(f"DSIP is not passed in '{cluster_ip}/ {cluster_details['cluster_info']['name']}'." + f" Skipping...'") + return + + pe_session = cluster_details["pe_session"] + cluster = PeCluster(pe_session) + + current_dsip = cluster_details.get("cluster_info", {}).get("cluster_external_data_services_ipaddress") + if current_dsip: + logger.warning(f"Data services IP is already set to {current_dsip} in " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + return + + try: + response = cluster.update_dsip(cluster_details["dsip"]) + + if response["value"]: + logger.info(f"Updated cluster DSIP to {cluster_details['dsip']} in " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + else: + self.exceptions.append(f"Failed to update cluster DSIP in " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + except Exception as e: + cluster_info = f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'" + self.exceptions.append(f"{type(self).__name__} failed for the cluster {cluster_info} with the error: {e}") + return + + def verify(self): + # todo is there a way to verify these + # do we need to verify these? + pass diff --git a/framework/tests/__init__.py b/framework/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/tests/unit/__init__.py b/framework/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/framework/tests/unit/test_data.py b/framework/tests/unit/test_data.py new file mode 100644 index 0000000..a1a87bf --- /dev/null +++ b/framework/tests/unit/test_data.py @@ -0,0 +1,73 @@ +import logging +import json + +LOGGER = logging.getLogger(__name__) + +API_HEADERS = {"Content-Type": "application/json", "Accept": "application/json"} + +REST_ARGS = { + "ip_address": "1.1.1.1", + "user": "user", + "pwd": "pwd", + "headers": API_HEADERS, + "port": 9440 +} + +REST_URI = "api/fc/v1/imaged_nodes/list" + +API_PAYLOAD = { + "filters": { + "node_state": "STATE_AVAILABLE" + } +} + +GET_RESPONSE = POST_RESPONSE = json.dumps({ + "entities": [ + { + "type": "News articles", + "id": "1", + "attributes": { + "title": "JSON:API paints my bikeshed!", + "body": "The shortest article. Ever.", + "created": "2015-05-22T14:56:29.000Z", + "updated": "2015-05-22T14:56:28.000Z" + }, + "relationships": { + "author": { + "data": { + "id": "42", + "type": "people" + } + } + } + }, + { + "type": "Journals", + "id": "2", + "attributes": { + "title": "JSON:API paints my bikeshed!", + "body": "The shortest article. Ever.", + "created": "2016-05-22T14:56:29.000Z", + "updated": "2016-05-22T14:56:28.000Z" + }, + "relationships": { + "author": { + "data": { + "id": "42", + "type": "people" + } + } + } + }, + { + "type": "Publishes", + "id": "3", + "attributes": { + "title": "JSON:API paints my bikeshed!", + "body": "The shortest article. Ever.", + "created": "2017-05-22T14:56:29.000Z", + "updated": "2017-05-22T14:56:28.000Z" + } + } + ] +}) diff --git a/framework/tests/unit/test_rest_utils.py b/framework/tests/unit/test_rest_utils.py new file mode 100644 index 0000000..1819fff --- /dev/null +++ b/framework/tests/unit/test_rest_utils.py @@ -0,0 +1,91 @@ +import pytest +import requests_mock +from unittest import mock +from helpers.rest_utils import * +from .test_data import REST_ARGS, REST_URI, GET_RESPONSE, POST_RESPONSE, API_HEADERS + + +@pytest.fixture() +def rest_util_obj(): + return RestAPIUtil(**REST_ARGS) + + +def raise_exceptions(rest_util_obj, req_type: str): + if req_type == "get": + obj = getattr(rest_util_obj, 'get') + elif req_type == "post": + obj = getattr(rest_util_obj, 'post') + else: + return + with pytest.raises(RestError) as e: + obj(REST_URI) + assert e.value.error == 'HTTPError' + with pytest.raises(RestError) as e: + obj(REST_URI) + assert e.value.error == 'ConnectionError' + with pytest.raises(RestError) as e: + obj(REST_URI) + assert e.value.error == 'Timeout Error' + with pytest.raises(RestError) as e: + obj(REST_URI) + assert e.value.error == 'Request Exception' + with pytest.raises(RestError) as e: + obj(REST_URI) + assert e.value.error == 'UnexpectedError' + + +@mock.patch('requests.Session.get', side_effect=[requests.exceptions.HTTPError('whoops'), + requests.exceptions.ConnectionError('Aich'), + requests.exceptions.Timeout('Oops'), + requests.exceptions.RequestException("Hehe"), + Exception("Ayyo")]) +def test_exceptions_get(_, rest_util_obj): + raise_exceptions(rest_util_obj, "get") + + +@mock.patch('requests.Session.post', side_effect=[requests.exceptions.HTTPError('whoops'), + requests.exceptions.ConnectionError('Aich'), + requests.exceptions.Timeout('Oops'), + requests.exceptions.RequestException("Hehe"), + Exception("Ayyo")]) +def test_exceptions_post(_, rest_util_obj): + raise_exceptions(rest_util_obj, "post") + + +def test_response_get_text(): + with requests_mock.Mocker() as request_mocker: + request_mocker.get(f"https://1.1.1.1:9440/{REST_URI}", text='resp') + rest_util_obj = RestAPIUtil(**REST_ARGS) + assert rest_util_obj.get(REST_URI) == 'resp' + + +def test_response_get_text_unauthorized(): + with requests_mock.Mocker() as request_mocker: + request_mocker.get(f"https://1.1.1.1:9440/{REST_URI}", text='') + rest_util_obj = RestAPIUtil(**REST_ARGS) + with pytest.raises(ResponseError): + rest_util_obj.get(REST_URI) + + +def test_response_get_text_bad_gateway(): + with requests_mock.Mocker() as request_mocker: + request_mocker.get(f"https://1.1.1.1:9440/{REST_URI}", text='') + rest_util_obj = RestAPIUtil(**REST_ARGS) + with pytest.raises(ResponseError): + rest_util_obj.get(REST_URI) + + +def test_response_get_json(): + with requests_mock.Mocker() as request_mocker: + request_mocker.get(f"https://1.1.1.1:9440/{REST_URI}", json=GET_RESPONSE, status_code=200, + headers=API_HEADERS) + rest_util_obj = RestAPIUtil(**REST_ARGS) + assert rest_util_obj.get(REST_URI) == GET_RESPONSE + + +def test_response_post_json(): + with requests_mock.Mocker() as request_mocker: + request_mocker.post(f"https://1.1.1.1:9440/{REST_URI}", json=POST_RESPONSE, status_code=200, + headers=API_HEADERS) + rest_util_obj = RestAPIUtil(**REST_ARGS) + assert rest_util_obj.post(REST_URI) == POST_RESPONSE diff --git a/releases/1.0/README.MD b/releases/1.0/README.MD new file mode 100644 index 0000000..17a2ce2 --- /dev/null +++ b/releases/1.0/README.MD @@ -0,0 +1,44 @@ +# v1.0 + +## Imaging + +- Create 1-node cluster without re-imaging. +- Re-image and create cluster for 2 or more nodes. +- Fetch free nodes from the provided block serial list. +- Ability to specify the node serials per cluster for deployment. +- Assign different CVM & Host IPs with the provided static IP range. +- Configure Cluster VIPs. + +## PC and PE configurations + +In PC, we can create/ configure: + +- Flow Network Security +- Categories +- Availability Zones +- Address groups +- Service groups +- Security policies +- Enable DR +- Create Protection policies +- Create Recovery plans + +In PE/ Clusters, we can create/ configure: + +- Change PE password +- Eula +- Pulse +- Register to PC +- Containers +- Subnets +- DSIP +- Active directory +- Role mappings + +## Create VM workloads + +- LAMP workloads on Centos VM with calm-dsl, using Self-Service (NCM). + +## Create Edge-AI workload + +- AI workload for AI training and Inference Day2 actions with calm-dsl, using Self-Service (NCM).