diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000..a510ea5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,49 @@ +name: Report an issue/bug +description: Report an issue/bug. +body: + - type: textarea + validations: + required: true + attributes: + label: Description + description: >- + Provide a clear and concise description of what the problem is. + - type: markdown + attributes: + value: | + ## Platform / OS + - type: dropdown + validations: + required: true + attributes: + label: What Platform / OS are you running? + options: + - Linux + - Mac OS + - Windows + - Docker + - type: markdown + attributes: + value: | + ## Version + - type: input + validations: + required: true + attributes: + label: What version are you running? + - type: markdown + attributes: + value: | + # Additional Details + - type: textarea + attributes: + label: Anything in the logs or a references that might be useful? + description: For example, error message, or stack traces. + render: txt + - type: textarea + attributes: + label: Additional information + description: > + If you have any additional information for us, use the field below. + Please note, you can attach screenshots or screen recordings here, by + dragging and dropping files in the field below. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..9f7bc6f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,10 @@ +blank_issues_enabled: false +contact_links: + - name: I have an issue with the Nutanix APIs + url: https://support.nutanix.com + about: Please report any Nutanix API issues to Nutanix Support. + - name: I have a question about ZTF + about: Please email solutions-automation@nutanix.com for help or to ask questions regarding ZTF. + - name: I have an issue with the documentation or would like to suggest content to be added + url: https://github.com/nutanixdev/zerotouch-framework/issues + about: Please use create an issue for documentation or website related issues. diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000..9bbf329 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,17 @@ +name: Request a new feature +description: Request a new feature or enhancement. +body: + - type: textarea + validations: + required: true + attributes: + label: Description + description: >- + Details of the feature and what should be added. + - type: textarea + attributes: + label: Additional information + description: > + If you have any additional information for us, use the field below. + Please note, you can attach screenshots or screen recordings here, by + dragging and dropping files in the field below. diff --git a/README.md b/README.md index 5e1140d..fbe0f11 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](LICENSE) [![GitHub: Actions](https://img.shields.io/badge/GitHub-Actions-blue.svg?logo=github)]() +[![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](LICENSE) [![GitHub: Actions](https://img.shields.io/badge/GitHub-Actions-blue.svg?logo=github)](ACTIONS) # Zero Touch Framework @@ -17,7 +17,8 @@ hence the name Zero Touch. The tool can also be extended to manage Day-1 and Day - Set up a local web server for downloading AOS tar and AHV iso files. - If AOS and AHV files are downloaded over https from Web Server, it needs to have a valid cert issued by a trusted certificate authority (CA). Certificates from a custom CA are not accepted. If - we need to skip this validation for https, we need to upgrade the Foundation version on the CVMs to 5.6.0.1 and above. + we need to skip this validation for https, we need to upgrade the Foundation version on the CVMs to 5.6.0.1 and + above. - For dark-site deployments: - Web Server needs to be setup to download the images, if there are any operations related to image upload and download (PC deploy, Image Upload, Ova upload etc.) @@ -114,7 +115,7 @@ can manage multiple **_clusters_** at the edge. inside **virtualenv**. ```sh python main.py --workflow config-pc -f config/pc-config.yml - ``` + ``` - `config-cluster` - This will configure the newly deployed **Clusters**. An example config template is provided in [config/example-configs/workflow-configs/cluster-config.yml](config/example-configs/workflow-configs/cluster-config.yml). @@ -122,7 +123,7 @@ can manage multiple **_clusters_** at the edge. inside **virtualenv**. ```sh python main.py --workflow config-cluster -f config/cluster-config.yml - ``` + ``` - `calm-vm-workloads` - This will use calm-dsl to create VM workloads on Clusters using NCM Self-Service from single or multiple calm-dsl files. An example config template is provided @@ -131,7 +132,7 @@ can manage multiple **_clusters_** at the edge. inside **virtualenv**. ```sh python main.py --workflow calm-vm-workloads -f config/create-vm-workloads.yml - ``` + ``` - `calm-edgeai-vm-workload` - This will use calm-dsl to create Edge-AI VM workload on Clusters using NCM Self-Service from single or multiple calm-dsl files. An example config template is provided @@ -153,73 +154,77 @@ the framework expects `SCRIPT`, `SCHEMA` and `FILE` parameters to run the specif optional. `SCHEMA` if specified verifies the correctness of input configuration. Below is the list of supported scripts available. -| Script | Operation | Example config | -|:-----------------------------|:---------------------------------------|:-----------------------------------------------------------------------------------------------| -| AddAdServerPe | Adds Active Directory in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | -| AddAdServerPc | Adds Active Directory in PC | [add_ad_server_pc.py](config/example-configs/script-configs/authentication_pc.yml) | -| AddAdUsersOss | Adds AdUsers in Objects | [directory_services_oss.yml](config/example-configs/script-configs/directory_services_oss.yml) | -| AddDirectoryServiceOss | Adds Active Directory in Objects | [directory_services_oss.yml](config/example-configs/script-configs/directory_services_oss.yml) | -| AddNameServersPc | Adds nameservers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/dns_ntp_pc.yml) | -| AddNameServersPe | Adds nameservers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/dns_ntp_pe.yml) | -| AddNtpServersPc | Adds NTP servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/dns_ntp_pc.yml) | -| AddNtpServersPe | Adds NTP servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/dns_ntp_pe.yml) | -| ConnectToAz | Connects to AZs | [remote_az.yml](config/example-configs/script-configs/remote_az.yml) | -| CreateAddressGroups | Creates Address Groups in PC | [address_groups_pc.yml](config/example-configs/script-configs/address_groups_pc.yml) | -| CreateBuckets | Creates buckets in an Objectstore | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | -| CreateAppFromDsl | Creates Calm Application from calm dsl | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | -| CreateNcmProject | Creates Calm projects | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | -| CreateContainerPe | Creates Storage container in PE | [storage_container_pe.yml](config/example-configs/script-configs/storage_container_pe.yml) | -| CreateKarbonClusterPc | Creates NKE Clusters in PC | [nke_clusters.yml](config/example-configs/script-configs/nke_clusters.yml) | -| CreateObjectStore | Creates Objectstores in PC | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | -| CreateCategoryPc | Creates Categories in PC | [category_pc.yml](config/example-configs/script-configs/category_pc.yml) | -| CreateSubnetsPc | Creates subnets in PC | [subnets_pc.yml](config/example-configs/script-configs/subnets_pc.yml) | -| CreateProtectionPolicy | Creates ProtectionPolicy in PC | [protection_policy.yml](config/example-configs/script-configs/protection_policy.yml) | -| CreateRecoveryPlan | Creates RecoveryPlan in PC | [recovery_plan.yml](config/example-configs/script-configs/recovery_plan.yml) | -| CreateRoleMappingPe | Creates Role mapping in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | -| CreateRoleMappingPc | Creates Role mapping in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| CreateNetworkSecurityPolicy | Creates Security policies in PC | [security_policy.yml](config/example-configs/script-configs/security_policy.yml) | -| CreateNcmAccount | Creates NTNX PC account in NCM | [ncm_account_users.yml](config/example-configs/script-configs/ncm_account_users.yml) | -| CreateNcmUser | Creates users in NCM | [ncm_account_users.yml](config/example-configs/script-configs/ncm_account_users.yml) | -| CreateServiceGroups | Creates Service Groups in PC | [service_groups.yml](config/example-configs/script-configs/service_groups.yml) | -| EnableDR | Enables DR in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | -| EnableMicrosegmentation | Enables Flow in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | -| EnableNke | Enables Karbon/ NKE in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | -| EnableObjects | Enables Objects in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | -| InitCalmDsl | Initialize calm dsl | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | -| ChangeDefaultAdminPasswordPe | Change PE admin password | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | -| AcceptEulaPe | Accept Eula PE | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | -| UpdatePulsePe | Update Pulse PE | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | -| ChangeDefaultAdminPasswordPc | Change PC password | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | -| AcceptEulaPc | Accept Eula PC | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | -| UpdatePulsePc | Update Pulse PC | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | -| PcImageUpload | Uploads images to PC clusters | [pc_image.yml](config/example-configs/script-configs/pc_image.yml) | -| PcOVAUpload | Uploads OVAs to PC clusters | [pc_ova.yml](config/example-configs/script-configs/pc_ova.yml) | -| RegisterToPc | Registers clusters to PC | [register_to_pc.yml](config/example-configs/script-configs/register_to_pc.yml) | -| ShareBucket | Shares a bucket with a list of users | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | -| UpdateDsip | Updates DSIP in PE | [update_dsip.yml](config/example-configs/script-configs/update_dsip.yml) | -| EnableFC | Enables Foundation Central in FC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | -| GenerateFcApiKey | Generates Foundation Central API Key | [generate_fc_api_key.yml](config/example-configs/script-configs/generate_fc_api_key.yml) | -| DeleteSubnetsPc | Delete Subnets in PC | [subnets_pc.yml](config/example-configs/script-configs/delete_subnets_pc.yml) | -| DeleteSubnetsPe | Delete Subnets in PE | [subnets_pe.yml](config/example-configs/script-configs/delete_subnets_pc.yml) | -| DeleteAdServerPc | Delete Active Directory in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteAdServerPe | Delete Active Directory in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | -| DeleteAddressGroups | Delete Address Groups in PC | [address_groups_pc.yml](config/example-configs/script-configs/address_groups_pc.yml) | -| DeleteNameServersPc | Delete Name Servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteNameServersPe | Delete Name Servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteNtpServersPc | Delete NTP Servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteNtpServersPe | Delete NTP Servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteCategoryPc | Delete Categories in PC | [category_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteProtectionPolicy | Delete Protection Policies in PC | [protection_policy.yml](config/example-configs/script-configs/protection_policy.yml) | -| DeleteRecoveryPlan | Delete Recovery Plans in PC | [recovery_plan.yml](config/example-configs/script-configs/recovery_plan.yml) | -| DeleteRoleMappingPc | Delete Role Mappings in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteRoleMappingPe | Delete Role Mappings in PE | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteNetworkSecurityPolicy | Delete Security Policies in PC | [security_policy.yml](config/example-configs/script-configs/security_policy.yml) | -| DeleteServiceGroups | Delete Service Groups in PC | [service_groups.yml](config/example-configs/script-configs/service_groups.yml) | -| DeleteVmPc | Delete VMs in PC | [delete_vms_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DeleteVmPe | Delete VMs in PE | [delete_vms_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | -| DisconnectAz | Disconnects Availability Zones in PC | [remote_az.yml](config/example-configs/script-configs/remote_az.yml) | -| PcImageDelete | Delete Images in PC | [pc_image.yml](config/example-configs/script-configs/pc_image.yml) | -| PcOVADelete | Delete OVAs in PC | [pc_ova.yml](config/example-configs/script-configs/pc_ova.yml) | +| Script | Operation | Example config | +|:-----------------------------|:-----------------------------------------------|:-----------------------------------------------------------------------------------------------| +| AddAdServerPe | Adds Active Directory in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | +| AddAdServerPc | Adds Active Directory in PC | [add_ad_server_pc.py](config/example-configs/script-configs/authentication_pc.yml) | +| AddAdUsersOss | Adds AdUsers in Objects | [directory_services_oss.yml](config/example-configs/script-configs/directory_services_oss.yml) | +| AddDirectoryServiceOss | Adds Active Directory in Objects | [directory_services_oss.yml](config/example-configs/script-configs/directory_services_oss.yml) | +| AddNameServersPc | Adds nameservers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/dns_ntp_pc.yml) | +| AddNameServersPe | Adds nameservers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/dns_ntp_pe.yml) | +| AddNtpServersPc | Adds NTP servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/dns_ntp_pc.yml) | +| AddNtpServersPe | Adds NTP servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/dns_ntp_pe.yml) | +| ConnectToAz | Connects to AZs | [remote_az.yml](config/example-configs/script-configs/remote_az.yml) | +| CreateAddressGroups | Creates Address Groups in PC | [address_groups_pc.yml](config/example-configs/script-configs/address_groups_pc.yml) | +| CreateBuckets | Creates buckets in an Objectstore | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | +| CreateAppFromDsl | Creates Calm Application from calm dsl | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | +| CreateNcmProject | Creates Calm projects | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | +| CreateContainerPe | Creates Storage container in PE | [storage_container_pe.yml](config/example-configs/script-configs/storage_container_pe.yml) | +| CreateKarbonClusterPc | Creates NKE Clusters in PC | [nke_clusters.yml](config/example-configs/script-configs/nke_clusters.yml) | +| CreateObjectStore | Creates Objectstores in PC | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | +| CreateCategoryPc | Creates Categories in PC | [category_pc.yml](config/example-configs/script-configs/category_pc.yml) | +| CreateSubnetsPc | Creates subnets in PC | [subnets_pc.yml](config/example-configs/script-configs/subnets_pc.yml) | +| CreateProtectionPolicy | Creates ProtectionPolicy in PC | [protection_policy.yml](config/example-configs/script-configs/protection_policy.yml) | +| CreateRecoveryPlan | Creates RecoveryPlan in PC | [recovery_plan.yml](config/example-configs/script-configs/recovery_plan.yml) | +| CreateRoleMappingPe | Creates Role mapping in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | +| CreateRoleMappingPc | Creates Role mapping in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| CreateNetworkSecurityPolicy | Creates Security policies in PC | [security_policy.yml](config/example-configs/script-configs/security_policy.yml) | +| CreateNcmAccount | Creates NTNX PC account in NCM | [ncm_account_users.yml](config/example-configs/script-configs/ncm_account_users.yml) | +| CreateNcmUser | Creates users in NCM | [ncm_account_users.yml](config/example-configs/script-configs/ncm_account_users.yml) | +| CreateServiceGroups | Creates Service Groups in PC | [service_groups.yml](config/example-configs/script-configs/service_groups.yml) | +| EnableDR | Enables DR in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | +| EnableMicrosegmentation | Enables Flow in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | +| EnableNke | Enables Karbon/ NKE in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | +| EnableObjects | Enables Objects in PC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | +| InitCalmDsl | Initialize calm dsl | [create-vm-workloads.yml](config/example-configs/workflow-configs/create-vm-workloads.yml) | +| ChangeDefaultAdminPasswordPe | Change PE admin password | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | +| AcceptEulaPe | Accept Eula PE | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | +| UpdatePulsePe | Update Pulse PE | [initial_cluster_config.yml](config/example-configs/script-configs/initial_cluster_config.yml) | +| ChangeDefaultAdminPasswordPc | Change PC password | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | +| AcceptEulaPc | Accept Eula PC | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | +| UpdatePulsePc | Update Pulse PC | [initial_pc_config.yml](config/example-configs/script-configs/initial_pc_config.yml) | +| PcImageUpload | Uploads images to PC clusters | [pc_image.yml](config/example-configs/script-configs/pc_image.yml) | +| PcOVAUpload | Uploads OVAs to PC clusters | [pc_ova.yml](config/example-configs/script-configs/pc_ova.yml) | +| RegisterToPc | Registers clusters to PC | [register_to_pc.yml](config/example-configs/script-configs/register_to_pc.yml) | +| ShareBucket | Shares a bucket with a list of users | [objectstore_buckets.yml](config/example-configs/script-configs/objectstore_buckets.yml) | +| UpdateDsip | Updates DSIP in PE | [update_dsip.yml](config/example-configs/script-configs/update_dsip.yml) | +| EnableFC | Enables Foundation Central in FC | [pc_creds.yml](config/example-configs/script-configs/pc_creds.yml) | +| GenerateFcApiKey | Generates Foundation Central API Key | [generate_fc_api_key.yml](config/example-configs/script-configs/generate_fc_api_key.yml) | +| DeleteSubnetsPc | Delete Subnets in PC | [subnets_pc.yml](config/example-configs/script-configs/delete_subnets_pc.yml) | +| DeleteSubnetsPe | Delete Subnets in PE | [subnets_pe.yml](config/example-configs/script-configs/delete_subnets_pc.yml) | +| DeleteAdServerPc | Delete Active Directory in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteAdServerPe | Delete Active Directory in PE | [authentication_pe.yml](config/example-configs/script-configs/authentication_pe.yml) | +| DeleteAddressGroups | Delete Address Groups in PC | [address_groups_pc.yml](config/example-configs/script-configs/address_groups_pc.yml) | +| DeleteNameServersPc | Delete Name Servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteNameServersPe | Delete Name Servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteNtpServersPc | Delete NTP Servers in PC | [dns_ntp_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteNtpServersPe | Delete NTP Servers in PE | [dns_ntp_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteCategoryPc | Delete Categories in PC | [category_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteProtectionPolicy | Delete Protection Policies in PC | [protection_policy.yml](config/example-configs/script-configs/protection_policy.yml) | +| DeleteRecoveryPlan | Delete Recovery Plans in PC | [recovery_plan.yml](config/example-configs/script-configs/recovery_plan.yml) | +| DeleteRoleMappingPc | Delete Role Mappings in PC | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteRoleMappingPe | Delete Role Mappings in PE | [authentication_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteNetworkSecurityPolicy | Delete Security Policies in PC | [security_policy.yml](config/example-configs/script-configs/security_policy.yml) | +| DeleteServiceGroups | Delete Service Groups in PC | [service_groups.yml](config/example-configs/script-configs/service_groups.yml) | +| DeleteVmPc | Delete VMs in PC | [delete_vms_pc.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DeleteVmPe | Delete VMs in PE | [delete_vms_pe.yml](config/example-configs/script-configs/authentication_pc.yml) | +| DisconnectAz | Disconnects Availability Zones in PC | [remote_az.yml](config/example-configs/script-configs/remote_az.yml) | +| PcImageDelete | Delete Images in PC | [pc_image.yml](config/example-configs/script-configs/pc_image.yml) | +| PcOVADelete | Delete OVAs in PC | [pc_ova.yml](config/example-configs/script-configs/pc_ova.yml) | +| CreateIdp | Create SAML2 compliant Identity Provider in PC | [saml_idp.yml](config/example-configs/script-configs/saml_idp.yml) | +| UpdateCvmFoundation | Update CVM Foundation Version | [update_cvm_foundation.yml](config/example-configs/script-configs/update_cvm_foundation.yml) | +| HaReservation | Enable/Disable HA Reservation in PE | [ha.yml](config/example-configs/script-configs/ha.yml) | +| RebuildCapacityReservation | Enable/Disable Rebuild Capacity Reservation in PE | [rebuild_capcity_reservation.yml](config/example-configs/script-configs/rebuild_capcity_reservation.yml) | To summarize, the input files can either be **json** or **yaml** files. You can find example configurations in [config/example-configs](config/example-configs) directory. Copy the required config file, inside [config](config) diff --git a/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py b/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py index 13b2701..09815d2 100644 --- a/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py +++ b/calm-dsl-bps/blueprints/edge_ai/linux/blueprint-lin.py @@ -15,33 +15,34 @@ for bp in bp_input: if bp["name"] == "edge-ai-test": - #print('bp name is {}'.format(bp["name"])) + # print('bp name is {}'.format(bp["name"])) subnet_name = bp["subnet"] - #print('subnet is {}'.format(subnet_name)) + # print('subnet is {}'.format(subnet_name)) cluster_name = bp["cluster"] - #print('cluster is {}'.format(cluster_name)) + # print('cluster is {}'.format(cluster_name)) image_name = bp["image"] - #print('image is {}'.format(image_name)) + # print('image is {}'.format(image_name)) AHVProvider = get_provider("AHV_VM") ApiObj = AHVProvider.get_api_obj() acct_ref = Ref.Account(ACCOUNT_NAME) acct_data = acct_ref.compile() account_uuid = acct_data["uuid"] res_subnets = ApiObj.subnets(account_uuid=account_uuid) - #print('subnet data is {}'.format(res_subnets)) + # print('subnet data is {}'.format(res_subnets)) net_name_uuid_list = [] for entity in res_subnets.get("entities", []): - if entity['status']['cluster_reference']['name'] == cluster_name and entity['status']['name'] == subnet_name: + if entity['status']['cluster_reference']['name'] == cluster_name and entity['status'][ + 'name'] == subnet_name: x = {"name": entity['status']['name'], "uuid": entity['metadata']['uuid']} net_name_uuid_list.append(x) - #print('net list is {}'.format(net_name_uuid_list)) + # print('net list is {}'.format(net_name_uuid_list)) res_images = ApiObj.images(account_uuid=account_uuid) image_name_uuid_list = [] for entity in res_images.get("entities", []): if entity['status']['name'] == image_name: x = {"name": entity['status']['name'], "uuid": entity['metadata']['uuid']} image_name_uuid_list.append(x) - #print('image list is {}'.format(image_name_uuid_list)) + # print('image list is {}'.format(image_name_uuid_list)) else: raise Exception("Cluster, Subnet or Image not specified") @@ -56,75 +57,104 @@ # Secret Variables if file_exists(f"{bp_root_folder}/.local/edge-key"): BP_CRED_cred_os_KEY = read_local_file(f"{bp_root_folder}/.local/edge-key") - #print(BP_CRED_cred_os_KEY) + # print(BP_CRED_cred_os_KEY) else: BP_CRED_cred_os_KEY = "nutanix" if file_exists(f"{bp_root_folder}/.local/edge-key.pub"): BP_CRED_cred_os_public_KEY = read_local_file(f"{bp_root_folder}/.local/edge-key.pub") - #print(BP_CRED_cred_os_public_KEY) + # print(BP_CRED_cred_os_public_KEY) else: BP_CRED_cred_os_public_KEY = "nutanix" # Credentials -BP_CRED_cred_os = basic_cred("ubuntu",BP_CRED_cred_os_KEY,name="cred_os",type="KEY",default=True) +BP_CRED_cred_os = basic_cred("ubuntu", BP_CRED_cred_os_KEY, name="cred_os", type="KEY", default=True) + class VM_Provision(Service): @action def NGTTools_Tasks(): - CalmTask.Exec.ssh(name="install_NGT",filename=INSTALL_SCRIPTS_DIRECTORY + "/ngt/install_ngt.sh",target=ref(VM_Provision),) + CalmTask.Exec.ssh(name="install_NGT", filename=INSTALL_SCRIPTS_DIRECTORY + "/ngt/install_ngt.sh", + target=ref(VM_Provision), ) @action def Configure_VM(): - CalmTask.Exec.ssh(name="ssh_key_copy",filename=INSTALL_SCRIPTS_DIRECTORY + "/ssh_key_copy.sh",target=ref(VM_Provision),) - CalmTask.Exec.ssh(name="setup",filename=INSTALL_SCRIPTS_DIRECTORY + "/setup.sh",target=ref(VM_Provision),) - CalmTask.Exec.ssh(name="validate driver",filename=INSTALL_SCRIPTS_DIRECTORY + "/validate_driver.sh",target=ref(VM_Provision),) + CalmTask.Exec.ssh(name="ssh_key_copy", filename=INSTALL_SCRIPTS_DIRECTORY + "/ssh_key_copy.sh", + target=ref(VM_Provision), ) + CalmTask.Exec.ssh(name="setup", filename=INSTALL_SCRIPTS_DIRECTORY + "/setup.sh", target=ref(VM_Provision), ) + CalmTask.Exec.ssh(name="validate driver", filename=INSTALL_SCRIPTS_DIRECTORY + "/validate_driver.sh", + target=ref(VM_Provision), ) + class AHVVM_Small(Substrate): os_type = "Linux" provider_type = "AHV_VM" provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) - readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + readiness_probe = readiness_probe(connection_type="SSH", disabled=False, retries="5", connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="30", credential=ref(BP_CRED_cred_os), ) # update CPU, Memory based on environment specific configs =============================================vvvvvv - provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["num_sockets"] - provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["num_vcpus_per_socket"] - provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["memory_size_mib"] + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"][ + "num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = \ + TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["small"][ + "memory_size_mib"] # update nic ===========================================================================================vvvvvv provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) # update image ==========================================================================================vvvvvv - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str( + image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str( + image_name_uuid_list[0]['uuid']) + class AHVVM_Medium(AHVVM_Small): provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) - readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + readiness_probe = readiness_probe(connection_type="SSH", disabled=False, retries="5", connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="30", credential=ref(BP_CRED_cred_os), ) # update CPU, Memory based on environment specific configs =============================================vvvvvv - provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["num_sockets"] - provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["num_vcpus_per_socket"] - provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["memory_size_mib"] + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"][ + "num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = \ + TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["medium"][ + "memory_size_mib"] # update nic ===========================================================================================vvvvvv provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) # update image ==========================================================================================vvvvvv - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str( + image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str( + image_name_uuid_list[0]['uuid']) + class AHVVM_Large(AHVVM_Small): provider_spec = read_ahv_spec("specs/ahv-provider-spec.yaml") provider_spec_editables = read_spec(os.path.join("specs", "create_spec_editables.yaml")) - readiness_probe = readiness_probe(connection_type="SSH",disabled=False,retries="5",connection_port=22,address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@",delay_secs="30",credential=ref(BP_CRED_cred_os),) + readiness_probe = readiness_probe(connection_type="SSH", disabled=False, retries="5", connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="30", credential=ref(BP_CRED_cred_os), ) # update CPU, Memory based on environment specific configs =============================================vvvvvv - provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["num_sockets"] - provider_spec.spec["resources"]["num_vcpus_per_socket"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["num_vcpus_per_socket"] - provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["memory_size_mib"] + provider_spec.spec["resources"]["num_sockets"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"][ + "num_sockets"] + provider_spec.spec["resources"]["num_vcpus_per_socket"] = \ + TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"]["num_vcpus_per_socket"] + provider_spec.spec["resources"]["memory_size_mib"] = TSHIRT_SPECS["linux-os"]["global"]["tshirt_sizes"]["large"][ + "memory_size_mib"] # update nic ===========================================================================================vvvvvv provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["name"] = str(net_name_uuid_list[0]['name']) provider_spec.spec["resources"]["nic_list"][0]["subnet_reference"]["uuid"] = str(net_name_uuid_list[0]['uuid']) # update image ==========================================================================================vvvvvv - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str(image_name_uuid_list[0]['name']) - provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str(image_name_uuid_list[0]['uuid']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["name"] = str( + image_name_uuid_list[0]['name']) + provider_spec.spec["resources"]["disk_list"][0]["data_source_reference"]["uuid"] = str( + image_name_uuid_list[0]['uuid']) + class AHV_Package_Sml(Package): services = [ref(VM_Provision)] @@ -134,12 +164,15 @@ def __install__(): VM_Provision.NGTTools_Tasks(name="Install NGT") VM_Provision.Configure_VM(name="Configure VM") + class AHV_Package_Med(AHV_Package_Sml): services = [ref(VM_Provision)] + class AHV_Package_Lrg(AHV_Package_Sml): services = [ref(VM_Provision)] + class AHV_Deployment_Sml(Deployment): min_replicas = "1" max_replicas = "100" @@ -147,6 +180,7 @@ class AHV_Deployment_Sml(Deployment): packages = [ref(AHV_Package_Sml)] substrate = ref(AHVVM_Small) + class AHV_Deployment_Medium(Deployment): min_replicas = "1" max_replicas = "100" @@ -154,6 +188,7 @@ class AHV_Deployment_Medium(Deployment): packages = [ref(AHV_Package_Med)] substrate = ref(AHVVM_Medium) + class AHV_Deployment_Large(Deployment): min_replicas = "1" max_replicas = "100" @@ -161,106 +196,187 @@ class AHV_Deployment_Large(Deployment): packages = [ref(AHV_Package_Lrg)] substrate = ref(AHVVM_Large) + class Common(Profile): - os_cred_public_key = CalmVariable.Simple.Secret(BP_CRED_cred_os_public_KEY,label="OS Cred Public Key",is_hidden=True,description="SSH public key for OS CRED user.") - NFS_PATH = CalmVariable.Simple("",label="NFS Share Path",regex="^(?:[0-9]{1,3}\.){3}[0-9]{1,3}:(\/[a-zA-Z0-9_-]+)+$",validate_regex=True,is_mandatory=True,is_hidden=False,runtime=True,description="Enter the path to your IP NFS share. For example 10.10.10.10:/sharename") - NFS_MOUNT_POINT = CalmVariable.Simple("/mnt/data",label="NFS Mount Point",is_mandatory=False,is_hidden=True,runtime=False,description="Local NFS Mount Point") - WORKER = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="") - NVIDIA_DRIVER_VERSION = CalmVariable.WithOptions.Predefined.string(["515.86.01"],label="Please select the NVidia driver version to be used.",default="515.86.01",is_mandatory=True,is_hidden=False,runtime=True,description="",) - NFS_WORKING_DIRECTORY = CalmVariable.WithOptions(["nai-dl-bench-"],label="AI Training Working Directory",default="nai-dl-bench-",is_mandatory=True,is_hidden=False,runtime=True,description="",) + os_cred_public_key = CalmVariable.Simple.Secret(BP_CRED_cred_os_public_KEY, label="OS Cred Public Key", + is_hidden=True, description="SSH public key for OS CRED user.") + NFS_PATH = CalmVariable.Simple("", label="NFS Share Path", + regex="^(?:[0-9]{1,3}\.){3}[0-9]{1,3}:(\/[a-zA-Z0-9_-]+)+$", validate_regex=True, + is_mandatory=True, is_hidden=False, runtime=True, + description="Enter the path to your IP NFS share. For example 10.10.10.10:/sharename") + NFS_MOUNT_POINT = CalmVariable.Simple("/mnt/data", label="NFS Mount Point", is_mandatory=False, is_hidden=True, + runtime=False, description="Local NFS Mount Point") + WORKER = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, description="") + NVIDIA_DRIVER_VERSION = CalmVariable.WithOptions.Predefined.string(["515.86.01"], + label="Please select the NVidia driver version to be used.", + default="515.86.01", is_mandatory=True, + is_hidden=False, runtime=True, description="", ) + NFS_WORKING_DIRECTORY = CalmVariable.WithOptions(["nai-dl-bench-"], label="AI Training Working Directory", + default="nai-dl-bench-", is_mandatory=True, is_hidden=False, + runtime=True, description="", ) @action def NaiDlBench_Data_Setup(name="NAI DL Bench Data Setup"): - CalmTask.Exec.ssh(name="NaiDlBench_Data_Setup",filename=DAY2_SCRIPTS_DIRECTORY + "/nai_dl_bench_data.sh",target=ref(VM_Provision),) - NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) + CalmTask.Exec.ssh(name="NaiDlBench_Data_Setup", filename=DAY2_SCRIPTS_DIRECTORY + "/nai_dl_bench_data.sh", + target=ref(VM_Provision), ) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"], + label="Please select the version to be used.", + default="0.2.3", is_mandatory=True, + is_hidden=False, runtime=True, + description="", ) @action def AITraining(name="AI Training"): - CalmTask.Exec.ssh(name="AI Training",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_training.sh",target=ref(VM_Provision),) - NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) - AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("training-output",label="AI Training Output Folder",is_mandatory=True,is_hidden=False,runtime=True,description="",) - AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("resnet.pth",label="AI Training Output File",is_mandatory=True,is_hidden=False,runtime=True,description="",) - EXTRA_PARAMS = CalmVariable.Simple("",label="AI Training Optional Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) + CalmTask.Exec.ssh(name="AI Training", filename=DAY2_SCRIPTS_DIRECTORY + "/ai_training.sh", + target=ref(VM_Provision), ) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"], + label="Please select the version to be used.", + default="0.2.3", is_mandatory=True, + is_hidden=False, runtime=True, + description="", ) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("training-output", label="AI Training Output Folder", + is_mandatory=True, is_hidden=False, runtime=True, + description="", ) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("resnet.pth", label="AI Training Output File", is_mandatory=True, + is_hidden=False, runtime=True, description="", ) + EXTRA_PARAMS = CalmVariable.Simple("", label="AI Training Optional Parameters", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.", ) @action def AIBatchInference(name="AI Batch Inference"): - CalmTask.Exec.ssh(name="AI Batch Inference",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference.sh",target=ref(VM_Provision),) - NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) - AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("",label="AI Training Output Folder",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) - AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output",label="AI Training Output Folder Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output Folder",) - AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("",label="AI Training Output File",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) - AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth",label="AI Training Output File Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output File",) - EXTRA_PARAMS = CalmVariable.Simple("",label="AI Inference Optional Extra Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) - AI_MODEL_NAME = CalmVariable.Simple("",label="AI Training Model Name",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value",) - AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50",label="AI Training Model Name Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI model name",) + CalmTask.Exec.ssh(name="AI Batch Inference", filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference.sh", + target=ref(VM_Provision), ) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"], + label="Please select the version to be used.", + default="0.2.3", is_mandatory=True, + is_hidden=False, runtime=True, + description="", ) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("", label="AI Training Output Folder", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed", ) + AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output", + label="AI Training Output Folder Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI Training Output Folder", ) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("", label="AI Training Output File", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed", ) + AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth", + label="AI Training Output File Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI Training Output File", ) + EXTRA_PARAMS = CalmVariable.Simple("", label="AI Inference Optional Extra Parameters", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.", ) + AI_MODEL_NAME = CalmVariable.Simple("", label="AI Training Model Name", is_mandatory=False, is_hidden=False, + runtime=True, + description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value", ) + AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50", label="AI Training Model Name Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI model name", ) @action def AIStartInferenceService(name="AI Start Inference Service"): - CalmTask.Exec.ssh(name="AI Start Inference Service",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_start.sh",target=ref(VM_Provision),) - NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"],label="Please select the version to be used.",default="0.2.3",is_mandatory=True,is_hidden=False,runtime=True,description="",) - AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("",label="AI Training Output Folder",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) - AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output",label="AI Training Output Folder Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output Folder",) - AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("",label="AI Training Output File",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed",) - AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth",label="AI Training Output File Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI Training Output File",) - EXTRA_PARAMS = CalmVariable.Simple("",label="AI Inference Optional Extra Parameters",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.",) - AI_MODEL_NAME = CalmVariable.Simple("",label="AI Training Model Name",is_mandatory=False,is_hidden=False,runtime=True,description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value",) - AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50",label="AI Training Model Name Default Value",is_mandatory=False,is_hidden=True,runtime=False,description="Default value for the AI model name",) + CalmTask.Exec.ssh(name="AI Start Inference Service", filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_start.sh", + target=ref(VM_Provision), ) + NAI_DL_BENCH_VERSION = CalmVariable.WithOptions.Predefined.string(["0.2.3"], + label="Please select the version to be used.", + default="0.2.3", is_mandatory=True, + is_hidden=False, runtime=True, + description="", ) + AI_TRAINING_OUTPUT_FOLDER = CalmVariable.Simple("", label="AI Training Output Folder", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed", ) + AI_TRAINING_OUTPUT_FOLDER_DEFAULT = CalmVariable.Simple("training-output", + label="AI Training Output Folder Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI Training Output Folder", ) + AI_TRAINING_OUTPUT_FILE = CalmVariable.Simple("", label="AI Training Output File", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed", ) + AI_TRAINING_OUTPUT_FILE_DEFAULT = CalmVariable.Simple("resnet.pth", + label="AI Training Output File Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI Training Output File", ) + EXTRA_PARAMS = CalmVariable.Simple("", label="AI Inference Optional Extra Parameters", is_mandatory=False, + is_hidden=False, runtime=True, + description="OPTIONAL - Leave blank if not needed - Enter any extra parameters needed, e.g., --quiet, etc.", ) + AI_MODEL_NAME = CalmVariable.Simple("", label="AI Training Model Name", is_mandatory=False, is_hidden=False, + runtime=True, + description="OPTIONAL - Leave blank if not needed - Enter the AI model name if not using the default value", ) + AI_MODEL_NAME_DEFAULT = CalmVariable.Simple("resnet50", label="AI Training Model Name Default Value", + is_mandatory=False, is_hidden=True, runtime=False, + description="Default value for the AI model name", ) @action def AIStopInferenceService(name="AI Stop Inference Service"): - CalmTask.Exec.ssh(name="AI Stop Inference Service",filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_stop.sh",target=ref(VM_Provision),) + CalmTask.Exec.ssh(name="AI Stop Inference Service", filename=DAY2_SCRIPTS_DIRECTORY + "/ai_inference_stop.sh", + target=ref(VM_Provision), ) + class AHV_Small(Common): deployments = [AHV_Deployment_Sml] @action def Scaleout(name="Scale Out"): - increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Sml),) + increase_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut", target=ref(AHV_Deployment_Sml), ) @action def Scalein(name="Scale In"): - decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Sml),) + decrease_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn", target=ref(AHV_Deployment_Sml), ) + class AHV_Medium(Common): deployments = [AHV_Deployment_Medium] @action def Scaleout(name="Scale Out"): - increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Medium),) + increase_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut", target=ref(AHV_Deployment_Medium), ) @action def Scalein(name="Scale In"): - decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Medium),) + decrease_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn", target=ref(AHV_Deployment_Medium), ) + class AHV_Large(Common): deployments = [AHV_Deployment_Large] @action def Scaleout(name="Scale Out"): - increase_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut",target=ref(AHV_Deployment_Large),) + increase_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_out("@@{increase_count}@@", name="ScaleOut", target=ref(AHV_Deployment_Large), ) @action def Scalein(name="Scale In"): - decrease_count = CalmVariable.Simple("1",label="",is_mandatory=False,is_hidden=False,runtime=True,description="",) - CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn",target=ref(AHV_Deployment_Large),) + decrease_count = CalmVariable.Simple("1", label="", is_mandatory=False, is_hidden=False, runtime=True, + description="", ) + CalmTask.Scaling.scale_in("@@{decrease_count}@@", name="ScaleIn", target=ref(AHV_Deployment_Large), ) -class Linux(Blueprint): +class Linux(Blueprint): services = [VM_Provision] packages = [AHV_Package_Sml, AHV_Package_Med, AHV_Package_Lrg] substrates = [AHVVM_Small, AHVVM_Medium, AHVVM_Large] - profiles = [AHV_Small, AHV_Medium, AHV_Large ] + profiles = [AHV_Small, AHV_Medium, AHV_Large] credentials = [BP_CRED_cred_os] + Linux.__doc__ = read_file('mp_meta/bp-description.md') + def main(): print(Linux.json_dumps(pprint=True)) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/config/example-configs/pod-configs/pod-config.yml b/config/example-configs/pod-configs/pod-config.yml index e97a91c..62637b0 100644 --- a/config/example-configs/pod-configs/pod-config.yml +++ b/config/example-configs/pod-configs/pod-config.yml @@ -148,9 +148,12 @@ pod: - name: objectstore01 domain: eng.company.com cluster: cluster-01 # Cluster reference - # DHCP subnet - network: vlan110 - # 4 statis IPs + # Storage Network + storage_network: vlan110 + # Public Network + public_network: vlan10 + # First 2 Static IPs should be storage network static IPs. + # Rest of the Static IPs will be assigned as Public static IPs. static_ip_list: - ip1 - ip2 diff --git a/config/example-configs/pod-configs/pod-deploy.yml b/config/example-configs/pod-configs/pod-deploy.yml index 34fb521..68eaa80 100644 --- a/config/example-configs/pod-configs/pod-deploy.yml +++ b/config/example-configs/pod-configs/pod-deploy.yml @@ -53,13 +53,15 @@ pod: cvm_ram: 12 # cvm_ram is Gigabytes. Minimum 12, no maximum. Default set it to 12. node_details: - node_serial: node-serial-1 - cvm_ip: valid-cvm-ip - host_ip: valid-host-ip - ipmi_ip: valid-ipmi-ip + cvm_ip: valid-cvm-ip # Optional. New CVM IP + host_ip: valid-host-ip # Optional. New Hypervisor Host IP + ipmi_ip: valid-ipmi-ip # Optional. New IPMI IP + hypervisor_hostname: hostname-01 # Optional. It is used to setup new hypervisor hostname. In-case of IPAM, hypervisor_hostname will be used to create hostrecord - node_serial: node-serial-2 cvm_ip: valid-cvm-ip host_ip: valid-host-ip - node_serial: node-serial-3 + hypervisor_hostname: hostname-03 redundancy_factor: 2 # Nutanix supports RF2, and also RF3 only if the cluster has 5+ nodes - cluster_name: site01-cluster-01 cluster_size: 3 # Cluster size will be the number of nodes for deployment diff --git a/config/example-configs/pod-configs/pod-management-config.yml b/config/example-configs/pod-configs/pod-management-config.yml index c3ccfd4..98610d6 100644 --- a/config/example-configs/pod-configs/pod-management-config.yml +++ b/config/example-configs/pod-configs/pod-management-config.yml @@ -1,5 +1,5 @@ --- -# This configuration is used to facilitate Initial PC configurations includind enabling FC +# This configuration is used to facilitate Initial PC configurations including enabling FC # Global variables, which can be inherited in blocks pc_creds: &pc_creds diff --git a/config/example-configs/script-configs/authentication_pc.yml b/config/example-configs/script-configs/authentication_pc.yml index 9cb29e4..527f156 100644 --- a/config/example-configs/script-configs/authentication_pc.yml +++ b/config/example-configs/script-configs/authentication_pc.yml @@ -58,7 +58,7 @@ pc_directory_services: # type: list # required: false # dependencies: -# - directory_services.ad_name +# - ad_name # schema: # type: dict # schema: @@ -80,6 +80,8 @@ pc_directory_services: # values: # required: true # type: list +# schema: +# type: string # diff --git a/config/example-configs/script-configs/authentication_pe.yml b/config/example-configs/script-configs/authentication_pe.yml index 75f97b4..d9a9229 100644 --- a/config/example-configs/script-configs/authentication_pe.yml +++ b/config/example-configs/script-configs/authentication_pe.yml @@ -85,7 +85,7 @@ clusters: # type: list # required: false # dependencies: -# - directory_services.ad_name +# - ad_name # schema: # type: dict # schema: @@ -107,6 +107,8 @@ clusters: # values: # required: true # type: list +# schema: +# type: string # diff --git a/config/example-configs/script-configs/objectstore_buckets.yml b/config/example-configs/script-configs/objectstore_buckets.yml index d1e4557..dd75cdf 100644 --- a/config/example-configs/script-configs/objectstore_buckets.yml +++ b/config/example-configs/script-configs/objectstore_buckets.yml @@ -8,8 +8,12 @@ objects: - name: objectstore01 domain: eng.company.com cluster: cluster01 - network: subnet01 - # Specify 4 static IPs + # Storage Network + storage_network: subnet01 + # Public Network + public_network: subnet02 + # First 2 Static IPs should be storage network static IPs. + # Rest of the Static IPs will be assigned as Public static IPs. static_ip_list: - ip1 - ip2 @@ -28,8 +32,12 @@ objects: - name: objectstore02 domain: eng.company.com cluster: cluster01 - network: subnet01 - # Specify 4 static IPs + # Storage Network + storage_network: subnet01 + # Public Network + public_network: subnet02 + # First 2 Static IPs should be storage network static IPs. + # Rest of the Static IPs will be assigned as Public static IPs. static_ip_list: - ip1 - ip2 @@ -68,12 +76,17 @@ objects: # cluster: # type: string # required: true -# network: +# storage_network: +# type: string +# required: true +# public_network: # type: string # required: true # static_ip_list: # type: list # required: true +# schema: +# type: string # num_worker_nodes: # type: integer # required: true diff --git a/config/example-configs/script-configs/saml_idp.yml b/config/example-configs/script-configs/saml_idp.yml new file mode 100644 index 0000000..8bf249e --- /dev/null +++ b/config/example-configs/script-configs/saml_idp.yml @@ -0,0 +1,79 @@ +pc_ip: valid-pc-ip +# Specify a user with permission to make API calls. +pc_credential: pc_user # credential reference from "vaults" in global.yml file + +#Create +saml_idp_configs: + - name: IDP1 + username_attr: "username_attribute" #Optional + email_attr: "email_attribute" #Optional + groups_attr: "groups_attribute" #Optional + groups_delim: "groups_delimiter" #Optional + # Either of metadata_url or metadata_path or idp_properties need to be specified + metadata_url: "IdentityProviderURL" + metadata_path: "path_where_idp_metadata_is_stored" # e.g. "config/idp.xml" if it is stored in config directory + idp_properties: + idp_url: "idp_url" # Required + login_url: "login_url" # Required + logout_url: "logout_url" # Required + error_url: "error_url" # Optional + certificate: "certificate" # copy the details here, don't specify the path + - name: IDP2 + username_attr: "username_attribute" #Optional + email_attr: "email_attribute" #Optional + groups_attr: "groups_attribute" #Optional + groups_delim: "groups_delimiter" #Optional + # Either of metadata_url or metadata_path or idp_properties need to be specified + metadata_url: "IdentityProviderURL" + metadata_path: "path_where_idp_metadata_is_stored" # e.g. "config/idp.xml" if it is stored in config directory + idp_properties: + idp_url: "idp_url" # Required + login_url: "login_url" # Required + logout_url: "logout_url" # Required + error_url: "error_url" # Optional + certificate: "certificate" # copy the details here, don't specify the path + +######################################## SCHEMA DOCUMENTATION ######################################## +### ---CREATE SCHEMA --- ### + +# saml_idp_configs: +# type: list +# schema: +# type: dict +# schema: +# name: +# type: string +# required: true +# empty: false +# username_attr: +# type: string +# email_attr: +# type: string +# groups_attr: +# type: string +# groups_delim: +# type: string +# metadata_path: +# type: string +# metadata_url: +# type: string +# idp_properties: +# type: dict +# schema: +# idp_url: +# required: true +# type: string +# login_url: +# required: true +# type: string +# logout_url: +# required: true +# type: string +# error_url: +# type: string +# certificate: +# required: true +# type: string +# + +#################################################################################################### diff --git a/config/example-configs/script-configs/subnets_pc.yml b/config/example-configs/script-configs/subnets_pc.yml index 4bc66aa..8447be5 100644 --- a/config/example-configs/script-configs/subnets_pc.yml +++ b/config/example-configs/script-configs/subnets_pc.yml @@ -106,6 +106,8 @@ clusters: # schema: # domain_name_server_list: # type: list +# schema: +# type: string # domain_search_list: # type: list # domain_name: diff --git a/config/example-configs/script-configs/subnets_pe.yml b/config/example-configs/script-configs/subnets_pe.yml index a82d480..e09210a 100644 --- a/config/example-configs/script-configs/subnets_pe.yml +++ b/config/example-configs/script-configs/subnets_pe.yml @@ -101,6 +101,8 @@ clusters: # schema: # domain_name_server_list: # type: list +# schema: +# type: string # domain_search_list: # type: list # domain_name: diff --git a/config/example-configs/script-configs/update_cvm_foundation.yml b/config/example-configs/script-configs/update_cvm_foundation.yml new file mode 100644 index 0000000..e5ae58f --- /dev/null +++ b/config/example-configs/script-configs/update_cvm_foundation.yml @@ -0,0 +1,26 @@ +cvm_creds: &cvm_creds + # Specify a user with permission to make API calls. + cvm_credential: cvm_credential # credential reference from "vaults" in global.yml file +foundation: &foundation + foundation_build_url: http://url-path-to-foundation-tar/ # url path to download the foundation tar file + foundation_version: "5.x" # Foundation version to be updated to in CVM +nameserver: &nameserver + nameserver: valid-nameserver # Optional. Provide a nameserver, if nameserver needs to be added to /etc/resolv.conf to download the tar file from the webserver +cvm_downgrade: &cvm_downgrade + downgrade: True # Optional, set to downgrade as True when the foundation version needs to be downgraded. + +cvms: + valid-cvm01-ip: + # Use global cvm creds + <<: *cvm_creds + # Use global nameserver + <<: *nameserver + # Use global foundation config + <<: *foundation + valid-cvm02-ip: + cvm_credential: cvm_credential # credential reference from "vaults" in global.yml file + nameserver: valid-nameserver # Optional. Provide a nameserver, if nameserver needs to be added to /etc/resolv.conf to download the tar file from the webserver + # Use global cvm_downgrade. This is optional, set to downgrade as True when the foundation version needs to be downgraded. + <<: *cvm_downgrade + foundation_build_url: http://url-path-to-foundation-tar/ # url path to download the foundation tar file + foundation_version: "5.x" # Foundation version to be updated to in CVM diff --git a/config/example-configs/workflow-configs/cluster-config.yml b/config/example-configs/workflow-configs/cluster-config.yml index 2418cce..dfcd73e 100644 --- a/config/example-configs/workflow-configs/cluster-config.yml +++ b/config/example-configs/workflow-configs/cluster-config.yml @@ -70,6 +70,15 @@ ntp_servers: &ntp_servers - 1.us.pool.ntp.org - 2.us.pool.ntp.org +# Not supported for single node cluster +ha_reservation: &ha_reservation + enable_failover: true + num_host_failure_to_tolerate: 1 + +# Not supported for 1 and 2 node cluster +rebuild_capacity_reservation: &rebuild_capacity_reservation + enable_rebuild_reservation: true + # configure the below clusters clusters: valid-site01-cluster-01-ip: @@ -92,6 +101,11 @@ clusters: # NTP, DNS servers <<: *ntp_servers <<: *name_servers + # Use global HA Reservation config + ha_reservation: + <<: *ha_reservation + # Use global Rebuild capacity Reservation config + <<: *rebuild_capacity_reservation valid-site02-cluster-02-ip: name: cluster-02 # Optional if name is already provided above @@ -136,3 +150,7 @@ clusters: domain_name_server_list: [ 10.10.10.10 ] domain_search_list: [ eng.company.com ] domain_name: eng.company.com + ha_reservation: + enable_failover: true + num_host_failure_to_tolerate: 0 + enable_rebuild_reservation: false diff --git a/config/example-configs/workflow-configs/pc-config.yml b/config/example-configs/workflow-configs/pc-config.yml index 4b8a18d..4564ae9 100644 --- a/config/example-configs/workflow-configs/pc-config.yml +++ b/config/example-configs/workflow-configs/pc-config.yml @@ -31,6 +31,37 @@ enable_pulse: true # Use globally declared ad_config <<: *ad_config +# IDP config +saml_idp_configs: + - name: IDP1 + username_attr: "username_attribute" #Optional + email_attr: "email_attribute" #Optional + groups_attr: "groups_attribute" #Optional + groups_delim: "groups_delimiter" #Optional + # Either of metadata_url or metadata_path or idp_properties need to be specified + metadata_url: "IdentityProviderURL" + metadata_path: "path_where_idp_metadata_is_stored" # e.g. "config/idp.xml" if it is stored in config directory + idp_properties: + idp_url: "idp_url" # Required + login_url: "login_url" # Required + logout_url: "logout_url" # Required + error_url: "error_url" # Optional + certificate: "certificate" # copy the details here, don't specify the path + - name: IDP2 + username_attr: "username_attribute" #Optional + email_attr: "email_attribute" #Optional + groups_attr: "groups_attribute" #Optional + groups_delim: "groups_delimiter" #Optional + # Either of metadata_url or metadata_path or idp_properties need to be specified + metadata_url: "IdentityProviderURL" + metadata_path: "path_where_idp_metadata_is_stored" # e.g. "config/idp.xml" if it is stored in config directory + idp_properties: + idp_url: "idp_url" # Required + login_url: "login_url" # Required + logout_url: "logout_url" # Required + error_url: "error_url" # Optional + certificate: "certificate" # copy the details here, don't specify the path + name_servers_list: - valid-name-server1 - valid-name-server1 @@ -50,9 +81,12 @@ objects: - name: objectstore01 domain: eng.company.com cluster: cluster-01 # Cluster reference - # DHCP subnet - network: vlan110 - # 4 statis IPs + # Storage Network + storage_network: vlan110 + # Public Network + public_network: vlan10 + # First 2 Static IPs should be storage network static IPs. + # Rest of the Static IPs will be assigned as Public static IPs. static_ip_list: - ip1 - ip2 diff --git a/config/global.yml b/config/global.yml index 9598029..4785957 100644 --- a/config/global.yml +++ b/config/global.yml @@ -4,11 +4,11 @@ vaults: metadata: host: cyberark-host.domain.com appId: testapp - user: administrator safe: Test - port: 11194 - password_path: .local/password - cert_file: .local/crt + port: 11194 # Optional + endpoint: AIMWebService + # Place the cert, key files in the .local directory + cert_file: .local/cert cert_key: .local/key # Cyberark API is used to get the password for below usernames credentials: @@ -33,6 +33,7 @@ vaults: address: service_account_cred remote_pc_credentials: username: admin + address: admin_cred infoblox_user: username: infoblox_user address: infoblox_cred diff --git a/framework/helpers/doc_generator_for_eg_configs.py b/framework/helpers/doc_generator_for_eg_configs.py index ada496c..d52fe02 100644 --- a/framework/helpers/doc_generator_for_eg_configs.py +++ b/framework/helpers/doc_generator_for_eg_configs.py @@ -1,26 +1,44 @@ +import os import yaml - +from pathlib import Path from framework.helpers.schema import * + example_config_directory_location = "config/example-configs" +scripts_folder = "script-configs" +workflow_folder = "workflow-configs" +pod_configs = "pod-configs" schema_example_config_map = { - f"{example_config_directory_location}/address_groups_pc.yml": [ADDRESS_GROUP_CREATE_SCHEMA, ADDRESS_GROUP_DELETE_SCHEMA], - f"{example_config_directory_location}/authentication_pc.yml": [AD_CREATE_SCHEMA, AD_DELETE_SCHEMA], - f"{example_config_directory_location}/authentication_pe.yml": [AD_CREATE_SCHEMA, AD_DELETE_SCHEMA], - f"{example_config_directory_location}/category_pc.yml": [CATEGORIES_CREATE_SCHEMA, CATEGORIES_DELETE_SCHEMA], - f"{example_config_directory_location}/objectstore_buckets.yml": [OBJECTS_CREATE_SCHEMA, OBJECTS_DELETE_SCHEMA], - f"{example_config_directory_location}/protection_policy.yml": [PROTECTION_RULES_CREATE_SCHEMA, PROTECTION_RULES_DELETE_SCHEMA], - f"{example_config_directory_location}/recovery_plan.yml": [RECOVERY_PLAN_CREATE_SCHEMA, RECOVERY_PLAN_DELETE_SCHEMA], - f"{example_config_directory_location}/remote_az.yml": [REMOTE_AZS_CONNECT_SCHEMA, REMOTE_AZS_DISCONNECT_SCHEMA], - f"{example_config_directory_location}/security_policy.yml": [SECURITY_POLICIES_CREATE_SCHEMA, SECURITY_POLICIES_DELETE_SCHEMA], - f"{example_config_directory_location}/service_groups.yml": [SERVICE_GROUP_CREATE_SCHEMA, SERVICE_GROUP_DELETE_SCHEMA], - f"{example_config_directory_location}/storage_container_pe.yml": [CONTAINERS_CREATE_SCHEMA, CONTAINERS_DELETE_SCHEMA], - f"{example_config_directory_location}/subnets_pe.yml": [NETWORKS_CREATE_SCHEMA, NETWORKS_DELETE_SCHEMA], - f"{example_config_directory_location}/subnets_pc.yml": [NETWORKS_CREATE_SCHEMA, NETWORKS_DELETE_SCHEMA], - f"{example_config_directory_location}/pc_image.yml": [IMAGE_UPLOAD_SCHEMA, IMAGE_DELETE_SCHEMA], - f"{example_config_directory_location}/pc_ova.yml": [OVA_UPLOAD_SCHEMA, OVA_DELETE_SCHEMA] + f"{example_config_directory_location}/{scripts_folder}/address_groups_pc.yml": [ADDRESS_GROUP_CREATE_SCHEMA, + ADDRESS_GROUP_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/authentication_pc.yml": [AD_CREATE_SCHEMA, AD_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/authentication_pe.yml": [AD_CREATE_SCHEMA, AD_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/category_pc.yml": [CATEGORIES_CREATE_SCHEMA, + CATEGORIES_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/objectstore_buckets.yml": [OBJECTS_CREATE_SCHEMA, + OBJECTS_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/protection_policy.yml": [PROTECTION_RULES_CREATE_SCHEMA, + PROTECTION_RULES_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/recovery_plan.yml": [RECOVERY_PLAN_CREATE_SCHEMA, + RECOVERY_PLAN_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/remote_az.yml": [REMOTE_AZS_CONNECT_SCHEMA, + REMOTE_AZS_DISCONNECT_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/security_policy.yml": [SECURITY_POLICIES_CREATE_SCHEMA, + SECURITY_POLICIES_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/service_groups.yml": [SERVICE_GROUP_CREATE_SCHEMA, + SERVICE_GROUP_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/storage_container_pe.yml": [CONTAINERS_CREATE_SCHEMA, + CONTAINERS_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/subnets_pe.yml": [NETWORKS_CREATE_SCHEMA, + NETWORKS_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/subnets_pc.yml": [NETWORKS_CREATE_SCHEMA, + NETWORKS_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/pc_image.yml": [IMAGE_UPLOAD_SCHEMA, IMAGE_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/pc_ova.yml": [OVA_UPLOAD_SCHEMA, OVA_DELETE_SCHEMA], + f"{example_config_directory_location}/{scripts_folder}/saml_idp.yml": [IDP_CREATE_SCHEMA] } + def remove_validators_from_schema(json_schema): if isinstance(json_schema, dict): schema = {} @@ -32,16 +50,18 @@ def remove_validators_from_schema(json_schema): return [remove_validators_from_schema(item) for item in json_schema] else: return json_schema - + + def generate_commented_yaml_from_schema(json_schema): json_without_validators = remove_validators_from_schema(json_schema) yaml_data = yaml.dump(json_without_validators, sort_keys=False) commented_yaml_str = "\n".join(f"# {line}" for line in yaml_data.split("\n")) return str(commented_yaml_str) + def delete_previous_comments(filename): # Read the file into memory - start_string = ('#'*40 + " SCHEMA DOCUMENTATION "+ '#'*40) + start_string = ('#' * 40 + " SCHEMA DOCUMENTATION " + '#' * 40) with open(filename, 'r') as file: lines = file.readlines() @@ -55,16 +75,18 @@ def delete_previous_comments(filename): # Truncate the file from the line containing the start string onwards with open(filename, 'w') as file: - file.writelines(lines[:(i-2)]) + file.writelines(lines[:(i - 2)]) + - if __name__ == "__main__": - - for file_path,schema in schema_example_config_map.items(): + project_root = Path(__file__).parent.parent.parent + + for file_path, schema in schema_example_config_map.items(): + file_path = os.path.join(project_root, file_path) delete_previous_comments(file_path) - to_be_appended_text = ("\n\n"+"#"*40 + " SCHEMA DOCUMENTATION "+ "#"*40) - if isinstance(schema,list): + to_be_appended_text = ("\n\n" + "#" * 40 + " SCHEMA DOCUMENTATION " + "#" * 40) + if isinstance(schema, list): headers = iter(["CREATE", "DELETE", "CLUSTERS"]) for json_schema in schema: to_be_appended_text += ("\n") @@ -74,6 +96,6 @@ def delete_previous_comments(filename): else: to_be_appended_text += generate_commented_yaml_from_schema(schema) - to_be_appended_text += ("#"*100 + "\n") + to_be_appended_text += ("#" * 100 + "\n") with open(file_path, 'a') as file: file.write(to_be_appended_text) diff --git a/framework/helpers/general_utils.py b/framework/helpers/general_utils.py index 79fbb90..7bd38fc 100644 --- a/framework/helpers/general_utils.py +++ b/framework/helpers/general_utils.py @@ -2,7 +2,7 @@ from email.mime.base import MIMEBase from email import encoders from pathlib import Path -import json5 as json +import json5 import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText @@ -50,7 +50,7 @@ def construct_include(loader: Loader, node: yaml.Node) -> Any: if extension in ('yaml', 'yml'): return yaml.load(f, Loader) elif extension in ('json',): - return json.load(f) + return json5.load(f) else: return ''.join(f.readlines()) @@ -65,7 +65,7 @@ def get_json_file_contents(file: str) -> dict: logger.info(f"Reading contents of the file: [{file}]") with open(file, 'r') as f: try: - return json.load(f) + return json5.load(f) except Exception as e: raise JsonError(str(e)) @@ -335,7 +335,7 @@ def get_subnet_mask(subnet: str): def send_mail_helper(subject: str, body: str, from_mail: str, to_mail: str, smtp_host: str, - attachment_path: str = "", port: str = 25): + attachment_path: str = "", port: int = 25): # Create message container - the correct MIME type is multipart/alternative. msg = MIMEMultipart('alternative') msg['Subject'] = subject diff --git a/framework/helpers/helper_functions.py b/framework/helpers/helper_functions.py index d35d293..fe4dec2 100644 --- a/framework/helpers/helper_functions.py +++ b/framework/helpers/helper_functions.py @@ -1,9 +1,10 @@ import os import pathlib import sys +from time import sleep from typing import Optional, Dict from .general_utils import validate_schema, get_json_file_contents, copy_file_util, enforce_data_arg, \ - get_yml_file_contents, delete_file_util, create_log_dir_push_logs + get_yml_file_contents, create_log_dir_push_logs from .rest_utils import RestAPIUtil from framework.scripts.python.helpers.ipam.ipam import IPAM from .log_utils import get_logger @@ -64,9 +65,9 @@ # print("Yay I am able to access the data I defined in previous function") # print(data["test"]) -def get_file_path(file_name, file_path): +def get_file_path(data: Dict, file_name: str, file_path: str): try: - final_path = os.path.join('.local', file_path) + final_path = os.path.join(data['project_root'], file_path) if os.path.exists(final_path): return final_path else: @@ -84,26 +85,30 @@ def get_creds_from_vault(data: dict): if data.get('vault_to_use') == 'cyberark': cark_data = data.get('vaults').get('cyberark').get('metadata') - fetch_pwd = CyberArk(cark_data.get('host'), cark_data.get('port'), - get_file_path('crt_file', cark_data.get('crt_file')), - get_file_path('crt_key', cark_data.get('crt_key'))) - - auth_token = fetch_pwd.generate_auth_token(cark_data.get('user'), - get_file_path('cyberark password', cark_data.get('password_path'))) + fetch_pwd = CyberArk(host=cark_data.get('host'), + port=cark_data.get("port", ""), + cert_file=get_file_path(data, 'cert_file', cark_data.get('cert_file')), + cert_key=get_file_path(data, 'cert_key', cark_data.get('cert_key'))) if not data.get('vaults').get('cyberark').get('credentials'): raise Exception("Credential details cannot be empty. Kindly add the required details.") for user_type, user_info in data.get('vaults').get('cyberark').get('credentials').items(): try: - user_pwd = fetch_pwd.fetch_creds(auth_token, user_info['username'], - cark_data.get('appId'), cark_data.get('safe'), - user_info.get('address')) + username, user_pwd = fetch_pwd.fetch_creds(user_info['username'], + cark_data.get('appId'), cark_data.get('safe'), + user_info.get('address'), + cark_data.get('endpoint') or "AIMWebService") except Exception as e: - logger.warning(f'Failed to fetch password for {user_type}:{e}') - user_pwd = None - data.get('vaults').get('cyberark').get('credentials').get(user_type).update({'password': user_pwd}) - fetch_pwd.session_log_off() + logger.warning(e) + continue + data.get('vaults').get('cyberark').get('credentials').get(user_type).update({ + 'username': username, + 'password': user_pwd + }) + + # sleep for 5 seconds to avoid any issues + sleep(5) @enforce_data_arg @@ -273,6 +278,7 @@ def create_pc_objects(data: dict, global_data: Optional[Dict] = None): global_data = global_data if global_data else {} + # todo use read_creds after setting data as either global or data # vault to use can either be in data or global data if 'vaults' not in data: # check in global data @@ -294,8 +300,8 @@ def create_pc_objects(data: dict, global_data: Optional[Dict] = None): if data.get("pc_credential") or global_data.get("pc_credential"): pc_user = data.get("pc_credential") or global_data.get("pc_credential") - if not cred_details.get(pc_user, {}).get('username') and not cred_details.get(pc_user, {}).get('password'): - raise Exception(f"PC credentials not specified for the user {pc_user} in 'global.yml'") + if not cred_details.get(pc_user, {}).get('username') or not cred_details.get(pc_user, {}).get('password'): + raise Exception(f"PC credentials not specified for the user {pc_user!r} in 'global.yml'") data["pc_session"] = RestAPIUtil(data["pc_ip"], user=cred_details[pc_user]['username'], @@ -349,6 +355,7 @@ def create_pe_objects(data: dict, global_data: Optional[Dict] = None): global_data = global_data if global_data else {} + # todo use read_creds # vault to use can either be in data or global data if 'vaults' not in data: # check in global data @@ -380,8 +387,8 @@ def create_pe_objects(data: dict, global_data: Optional[Dict] = None): if "pe_credential" in cluster_details or "pe_credential" in data: pe_cred = cluster_details.get("pe_credential") or data.get("pe_credential") - if not cred_details.get(pe_cred, {}).get('username') and not cred_details.get(pe_cred, {}).get('password'): - raise Exception(f"PE credentials not specified for the user {pe_cred} in 'global.yml'") + if not cred_details.get(pe_cred, {}).get('username') or not cred_details.get(pe_cred, {}).get('password'): + raise Exception(f"PE credentials not specified for the user {pe_cred!r} in 'global.yml'") pe_session = RestAPIUtil(cluster_ip, user=cred_details[pe_cred]['username'], @@ -468,9 +475,11 @@ def create_ipam_object(data: dict, global_data: Optional[Dict] = None): @enforce_data_arg def read_creds(data: dict, credential: str) -> (str, str): + # todo read_creds should not access data. We should have details of data in main file itself vault = data.get("vault_to_use") - username, password = (data.get("vaults", {}).get(vault, {}).get("credentials", {}). - get(credential, {"username": None, "password": None}).values()) + credential = (data.get("vaults", {}).get(vault, {}).get("credentials", {}). + get(credential, {"username": None, "password": None})) + username, password = credential.get("username"), credential.get("password") if not username or not password: raise Exception(f"Credentials for the service account {credential!r} not found in the vault") diff --git a/framework/helpers/schema.py b/framework/helpers/schema.py index b3b0aff..9b66377 100644 --- a/framework/helpers/schema.py +++ b/framework/helpers/schema.py @@ -1,5 +1,5 @@ from framework.helpers.general_utils import validate_ip, contains_whitespace, validate_domain, validate_subnet, \ - validate_dsip + validate_dsip, validate_ip_list """ We are using a popular Python library "cerberus" to define the json/ yml schema @@ -177,6 +177,7 @@ 'node_serial': { 'type': 'string', 'required': True, + 'validator': contains_whitespace }, 'cvm_ip': { 'type': 'string', @@ -192,6 +193,11 @@ 'type': 'string', 'required': False, 'validator': validate_ip + }, + 'hypervisor_hostname': { + 'type': 'string', + 'required': False, + 'validator': contains_whitespace } } } @@ -245,6 +251,20 @@ 'enable_pulse': {'type': 'boolean'} } +HA_RESERVATION_SCHEMA = { + 'ha_reservation': { + 'type': 'dict', + 'schema': { + 'enable_failover': {'type': 'boolean', 'required': True}, + 'num_host_failure_to_tolerate': {'type': 'integer', 'required': True} + } + } +} + +REBUILD_RESERVATION_SCHEMA = { + 'enable_rebuild_reservation': {'type': 'boolean'} +} + AD_CREATE_SCHEMA = { 'directory_services': { 'type': 'dict', @@ -344,6 +364,65 @@ }} } +IDP_CREATE_SCHEMA = { + 'saml_idp_configs': { + 'type': 'list', + 'schema': { + 'type': 'dict', + 'schema': { + 'name': { + 'type': 'string', + 'required': True, + 'empty': False + }, + 'username_attr': { + 'type': 'string', + }, + 'email_attr': { + 'type': 'string', + }, + 'groups_attr': { + 'type': 'string', + }, + 'groups_delim': { + 'type': 'string', + }, + 'metadata_path': { + 'type': 'string', + }, + 'metadata_url': { + 'type': 'string', + }, + 'idp_properties': { + 'type': 'dict', + 'schema': { + 'idp_url': { + 'required': True, + 'type': 'string' + }, + 'login_url': { + 'required': True, + 'type': 'string' + }, + 'logout_url': { + 'required': True, + 'type': 'string' + }, + 'error_url': { + 'type': 'string' + }, + 'certificate': { + 'required': True, + 'type': 'string' + } + + } + } + } + } + } +} + CONTAINERS_CREATE_SCHEMA = { 'containers': { 'type': 'list', @@ -1053,6 +1132,8 @@ **AD_CREATE_SCHEMA, **NETWORKS_CREATE_SCHEMA, **CONTAINERS_CREATE_SCHEMA, + **HA_RESERVATION_SCHEMA, + **REBUILD_RESERVATION_SCHEMA, 'ncm_subnets': { 'type': 'list', 'schema': { @@ -1189,7 +1270,8 @@ 'name': {'type': 'string', 'required': True}, 'domain': {'type': 'string', 'required': True, 'validator': validate_domain}, 'cluster': {'type': 'string', 'required': True}, - 'network': {'type': 'string', 'required': True}, + 'storage_network': {'type': 'string', 'required': True}, + 'public_network': {'type': 'string', 'required': True}, 'static_ip_list': {'type': 'list', 'required': True, 'schema': {'type': 'string', 'validator': validate_ip}}, 'num_worker_nodes': {'type': 'integer', 'required': True}, @@ -1238,6 +1320,7 @@ }, 'pc_credential': CREDENTIAL_SCHEMA, **AD_CREATE_SCHEMA, + **IDP_CREATE_SCHEMA, **EULA_SCHEMA, **PULSE_SCHEMA, **DNS_SCHEMA, @@ -1787,3 +1870,21 @@ } } } + +CVMF_UPDATE_SCHEMA = { + 'cvms': { + 'type': 'dict', + 'required': True, + 'keyschema': {'type': 'string', 'validator': validate_ip}, + 'valueschema': { + 'type': 'dict', + 'schema': { + 'cvm_credential': {'type': 'string', 'validator': contains_whitespace, 'required': True}, + 'foundation_build_url': {'type': 'string', 'required': True, 'validator': contains_whitespace,}, + 'foundation_version': {'type': 'string', 'required': True, 'validator': contains_whitespace,}, + 'downgrade': {'type': 'boolean', 'required': False}, + 'nameserver': {'type': 'string', 'required': False} + } + }, + } +} diff --git a/framework/helpers/vault_utils.py b/framework/helpers/vault_utils.py index b1f3154..fb50fb0 100644 --- a/framework/helpers/vault_utils.py +++ b/framework/helpers/vault_utils.py @@ -1,7 +1,6 @@ """ Function to authenticate with the cyber ark instance using certificate and key """ -import sys from framework.helpers.log_utils import get_logger from framework.helpers.rest_utils import RestAPIUtil @@ -9,52 +8,25 @@ class CyberArk: - def __init__(self, host: str, port: str, cert_file: str, cert_key: str): + def __init__(self, host: str, cert_file: str, cert_key: str, port: str = "", ): self.cert_file = cert_file self.cert_key = cert_key self.logger = logger self.session = RestAPIUtil(ip_address=host, user=None, pwd=None, port=port) - def generate_auth_token(self, cyber_ark_user, password_path): - """ - Generate authenticate token for cyberark for a session - params: - cyber_ark_user: session username - password_path: file path of the password.txt file - """ - cyber_ark_pwd = open(password_path).read() - uri = "PasswordVault/API/Auth/CyberArk/Logon" - data = {"username": cyber_ark_user, "password": cyber_ark_pwd} - try: - if self.cert_file: - return self.session.post(uri, data=data, verify=self.cert_file, cert=(self.cert_file, self.cert_key)) - else: - return self.session.post(uri, data=data) - except Exception as e: - logger.error(f"Failed to generate authentication token for CyberArk. Error is: {e}") - sys.exit() - - def fetch_creds(self, auth_token, username, app_id, safe, address): + def fetch_creds(self, username: str, app_id: str, safe: str, address: str, endpoint: str): """ Fetch password from the vault. params: - auth_token: token generated by generate_auth_token func username: account name app_id: application id safe: safe name where the account is saved address: address of the entity - return: password of the account + return: username, password of the account """ - uri = f'AIMWebService/api/Accounts?AppId={app_id}&Query=Safe={safe};UserName={username};Address={address}' - data = {'Authorization': auth_token} + uri = f"{endpoint}/api/Accounts?AppID={app_id}&Query=Safe={safe};UserName={username};Address={address}" try: - user_pwd_details = self.session.get(uri, data=data, verify=self.cert_file, cert=(self.cert_file, self.cert_key)) - return user_pwd_details.get('Content') + user_pwd_details = self.session.get(uri, verify=self.cert_file, cert=(self.cert_file, self.cert_key)) + return user_pwd_details.get("UserName"), user_pwd_details.get("Content") except Exception as e: - logger.error(f"Failed to fetch password for user: {username}. Error is: {e}") - - def session_log_off(self): - """ - log off from the active session - """ - self.session.post('PasswordVault/API/Auth/LogOff') + raise Exception(f"Failed to fetch password for user: {username!r}. Error is: {e}") diff --git a/framework/scripts/python/__init__.py b/framework/scripts/python/__init__.py index c5a5e21..e6cf6e7 100644 --- a/framework/scripts/python/__init__.py +++ b/framework/scripts/python/__init__.py @@ -4,6 +4,7 @@ from .ncm.project.create_calm_project import CreateNcmProject from .objects.configure_objects import OssConfig from .pc.configure_pc import PcConfig +from .pc.create.create_identity_provider import CreateIdp from .pc.upload.pc_image_upload import PcImageUpload from .pc.upload.pc_ova_upload import PcOVAUpload from .pe.configure_cluster import ClusterConfig @@ -79,6 +80,7 @@ from .pe.update.rebuild_capacity_reservation import RebuildCapacityReservation from .pe.delete.delete_rolemapping_pe import DeleteRoleMappingPe from .objects.objectstore.delete_objectstore import DeleteObjectStore +from .cvm.update_cvm_foundation import UpdateCvmFoundation __all__ = ["AddAdServerPe", "PodConfig", "ConnectToAz", "CreateBp", "CreateCategoryPc", "CreateContainerPe", "CreateServiceGroups", "CreateRoleMappingPe", "CreateNetworkSecurityPolicy", @@ -97,4 +99,4 @@ "PcOVADelete", "DeleteVmPc", "DeleteVmPe", "DisconnectAz", "AcceptEulaPc", "ChangeDefaultAdminPasswordPc", "PowerOnVmPc", "UpdatePulsePc", "PcOVADelete", "DeleteVmPc", "DisconnectAz", "ChangeDefaultAdminPasswordPe", "UpdatePulsePe", "AcceptEulaPe", "PcConfig", "ClusterConfig", "OssConfig", "DeployPC", "DeleteSubnetsPc", - "DeleteSubnetsPe", "PcImageUpload", "PcOVAUpload"] + "DeleteSubnetsPe", "PcImageUpload", "PcOVAUpload", "CreateIdp", "UpdateCvmFoundation"] diff --git a/framework/scripts/python/configure_pod.py b/framework/scripts/python/configure_pod.py index 18e5142..a0a8e98 100644 --- a/framework/scripts/python/configure_pod.py +++ b/framework/scripts/python/configure_pod.py @@ -70,7 +70,8 @@ def execute(self): if edge_site.get("nke_clusters", []): edge_site["pc_session"] = block["pc_session"] self.nke_scripts = self.nke_scripts or BatchScript(parallel=True) - self.nke_scripts.add(CreateKarbonClusterPc(edge_site, log_file=f"{block_name}_pc_ops.log")) + self.nke_scripts.add(CreateKarbonClusterPc(edge_site, global_data=self.data, + log_file=f"{block_name}_pc_ops.log")) # configure PC services/ entities self.block_batch_scripts[block_name].add(PcConfig(data=deepcopy(block), global_data=self.data, diff --git a/framework/scripts/python/cvm/cvm_script.py b/framework/scripts/python/cvm/cvm_script.py new file mode 100644 index 0000000..434ba55 --- /dev/null +++ b/framework/scripts/python/cvm/cvm_script.py @@ -0,0 +1,59 @@ +import threading +import multiprocessing +import concurrent.futures +from abc import abstractmethod +from typing import Dict +from framework.helpers.log_utils import get_logger +from framework.scripts.python.script import Script + +logger = get_logger(__name__) + + +class CvmScript(Script): + def __init__(self, data: Dict, parallel: bool = True, **kwargs): + self.data = data + self.cvms = self.data.get("cvms", {}) + self.parallel = parallel + super(CvmScript, self).__init__(**kwargs) + self.results["cvms"] = {} + # Set the value of max_workers based on the number of CPU cores + self.max_workers = multiprocessing.cpu_count() + 4 + + def execute(self, **kwargs): + if self.parallel: + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: + executor.map(self.execute_single_cvm, self.cvms.keys(), self.cvms.values()) + except Exception as e: + self.exceptions.append(e) + else: + try: + for cvm_ip, cvm_details in self.cvms.items(): + self.execute_single_cvm(cvm_ip, cvm_details) + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if self.parallel: + with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: + executor.map(self.verify_single_cvm, self.cvms.keys(), self.cvms.values()) + else: + try: + for cvm_ip, cvm_details in self.cvms.items(): + self.verify_single_cvm(cvm_ip, cvm_details) + except Exception as e: + self.exceptions.append(e) + + @abstractmethod + def execute_single_cvm(self, cvm_ip: str, cvm_details: Dict): + pass + + @abstractmethod + def verify_single_cvm(self, cvm_ip: str, cvm_details: Dict): + pass + + def set_current_thread_name(self, cvm_ip: str): + current_thread = threading.current_thread() + + if current_thread != threading.main_thread(): + current_thread.name = f"Thread-{type(self).__name__}-{cvm_ip}" diff --git a/framework/scripts/python/cvm/update_cvm_foundation.py b/framework/scripts/python/cvm/update_cvm_foundation.py new file mode 100644 index 0000000..b734cac --- /dev/null +++ b/framework/scripts/python/cvm/update_cvm_foundation.py @@ -0,0 +1,65 @@ +from framework.scripts.python.cvm.cvm_script import CvmScript +from framework.helpers.log_utils import get_logger +from framework.scripts.python.helpers.ssh_cvm import SSHCVM +from framework.helpers.helper_functions import read_creds + +logger = get_logger(__name__) + + +class UpdateCvmFoundation(CvmScript): + """ + Update CVM foundation + """ + def __init__(self, data: dict, **kwargs): + super(UpdateCvmFoundation, self).__init__(data, **kwargs) + self.logger = self.logger or logger + + def execute_single_cvm(self, cvm_ip, cvm_details): + """ + Args: + cvm_ip (str): CVM IP + cvm_details: + cvm_credential (str): CVM credential to fetch from vault in global.yml + foundation_build_url (str): Foundation URL to download the tar file from + foundation_version (str): Foundation version to download the tar file from + nameserver (str, Optional): Optional. Nameserver to add to /etc/resolv.conf + downgrade (boolean, Optional): Optional. True if Downgrade the foundation version + """ + try: + if not cvm_details.get("cvm_credential"): + self.exceptions.append(f"{cvm_ip}: CVM Credentials are not provided") + return + cvm_username, cvm_password = (read_creds(data=self.data, credential=cvm_details["cvm_credential"])) + ssh_cvm = SSHCVM(cvm_ip, cvm_username, cvm_password) + # Update namerser if provided + if cvm_details.get("nameserver"): + status, error = ssh_cvm.update_resolv_conf(cvm_details["nameserver"]) + # Not going to exit if nameserver update fails, proceed with downloading file. + if not status: + self.logger.warning(f"{cvm_ip}: Failed to update nameserver in resolv.conf {error}") + + # Update the CVM Foundation + status, error_message = ssh_cvm.update_cvm_foundation(foundation_url_path=cvm_details["foundation_build_url"], + downgrade=cvm_details.get("downgrade", False)) + if not status: + self.exceptions.append(f"{cvm_ip}: Failed to update CVM foundation: {error_message}") + except Exception as e: + self.exceptions.append(f"Exception occured while updating CVM Foundation: {e}") + + def verify_single_cvm(self, cvm_ip, cvm_details): + self.results["cvms"] = {"UpdateCvmFoundation": {}} + try: + self.results["cvms"]["UpdateCvmFoundation"][cvm_ip] = "Can't Verify" + cvm_username, cvm_password = (read_creds(data=self.data, credential=cvm_details["cvm_credential"])) + ssh_cvm = SSHCVM(cvm_ip, cvm_username, cvm_password) + status, output = ssh_cvm.get_foundation_version() + self.logger.debug(output) + if not status: + self.logger.error(f"{cvm_ip} - Failed to fetch CVM foundation version: {output}") + return + if cvm_details["foundation_version"] in output: + self.results["cvms"]["UpdateCvmFoundation"][cvm_ip] = "PASS" + else: + self.results["cvms"]["UpdateCvmFoundation"][cvm_ip] = "FAIL" + except Exception as e: + self.logger.error(e) diff --git a/framework/scripts/python/helpers/objects/objectstore.py b/framework/scripts/python/helpers/objects/objectstore.py index 22ee0ec..19534ff 100644 --- a/framework/scripts/python/helpers/objects/objectstore.py +++ b/framework/scripts/python/helpers/objects/objectstore.py @@ -64,8 +64,10 @@ def get_payload(self, **kwargs) -> Dict: cluster(str): The PE cluster or cluster_uuid(str): The PE cluster uuid static_ip_list(list): The list static ips, minimal 6 IPs - network(str): The network name - (or) network_uuid(str): The uuid of the network, much have IPAM enabled + storage_network(str): The Storage network name + (or) storage_network_uuid(str): The uuid of the network, must have IPAM enabled + public_network(str): The Public network name + (or) public_network_uuid(str): The uuid of the network, must have IPAM enabled num_worker_nodes(int, optional): Number of worker nodes required description(str, optional): The description num_cpu(int, optional): The num of vCpus for objectstore VMs @@ -76,23 +78,31 @@ def get_payload(self, **kwargs) -> Dict: """ static_ip_list = kwargs.get("static_ip_list", []) cluster_uuid = kwargs.get("cluster_uuid") - network_uuid = kwargs.get("network_uuid") + storage_network_uuid = kwargs.get("storage_network_uuid") + public_network_uuid = kwargs.get("public_network_uuid") - if not cluster_uuid and not network_uuid: - if not kwargs.get("cluster") and not kwargs.get("network"): - raise Exception("Cluster name, Network name has to be passed") + if not cluster_uuid: + if not kwargs.get("cluster"): + raise Exception("Cluster name has to be passed") cluster_obj = PcCluster(session=self.session) cluster_uuid = cluster_obj.get_uuid_by_name(kwargs["cluster"]) - - network_obj = Network(session=self.session) - network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], - subnet_name=kwargs["network"]) - - if not cluster_uuid and not network_uuid: + network_obj = Network(session=self.session) + if not storage_network_uuid: + if not kwargs.get("storage_network") and not kwargs.get("cluster"): + raise Exception("Storage Network name, Cluster name has to be passed") + storage_network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], + subnet_name=kwargs["storage_network"]) + if not public_network_uuid: + if not kwargs.get("public_network") and not kwargs.get("cluster"): + raise Exception("Public Network name, Cluster name has to be passed") + public_network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], + subnet_name=kwargs["public_network"]) + + if not cluster_uuid and not storage_network_uuid and not public_network_uuid: raise Exception("Invalid Cluster or Network name specified!") - if len(static_ip_list) < 4: - raise Exception("Provide at-least 4 static IPs") + if len(static_ip_list) < 3: + raise Exception("Provide at-least 3 static IPs") payload = \ { "api_version": "3.0", @@ -117,11 +127,11 @@ def get_payload(self, **kwargs) -> Dict: "buckets_infra_network_reference": { "kind": "subnet", - "uuid": network_uuid + "uuid": storage_network_uuid }, "client_access_network_reference": { "kind": "subnet", - "uuid": network_uuid + "uuid": public_network_uuid }, "aggregate_resources": { @@ -141,8 +151,10 @@ def create(self, **kwargs) -> Dict: cluster(str): The PE cluster (or) cluster_uuid(str): The PE cluster uuid static_ip_list(list): The list static ips, minimal 6 IPs - network(str): The network name - (or) network_uuid(str): The uuid of the network, must have IPAM enabled + storage_network(str): The Storage network name + (or) storage_network_uuid(str): The uuid of the network, must have IPAM enabled + public_network(str): The Public network name + (or) public_network_uuid(str): The uuid of the network, must have IPAM enabled num_worker_nodes(int, optional): Number of worker nodes required description(str, optional): The description num_cpu(int, optional): The num of vCpus for objectstore VMs @@ -153,23 +165,31 @@ def create(self, **kwargs) -> Dict: """ static_ip_list = kwargs.get("static_ip_list", []) cluster_uuid = kwargs.get("cluster_uuid") - network_uuid = kwargs.get("network_uuid") + storage_network_uuid = kwargs.get("storage_network_uuid") + public_network_uuid = kwargs.get("public_network_uuid") - if not cluster_uuid and not network_uuid: - if not kwargs.get("cluster") and not kwargs.get("network"): - raise Exception("Cluster name, Network name has to be passed") + if not cluster_uuid: + if not kwargs.get("cluster"): + raise Exception("Cluster name has to be passed") cluster_obj = PcCluster(session=self.session) cluster_uuid = cluster_obj.get_uuid_by_name(kwargs["cluster"]) - - network_obj = Network(session=self.session) - network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], - subnet_name=kwargs["network"]) - - if not cluster_uuid and not network_uuid: + network_obj = Network(session=self.session) + if not storage_network_uuid: + if not kwargs.get("storage_network") and not kwargs.get("cluster"): + raise Exception("Storage Network name, Cluster name has to be passed") + storage_network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], + subnet_name=kwargs["storage_network"]) + if not public_network_uuid: + if not kwargs.get("public_network") and not kwargs.get("cluster"): + raise Exception("Public Network name, Cluster name has to be passed") + public_network_uuid = network_obj.get_uuid_by_name(cluster_name=kwargs["cluster"], + subnet_name=kwargs["public_network"]) + + if not cluster_uuid and not storage_network_uuid and not public_network_uuid: raise Exception("Invalid Cluster or Network name specified!") - if len(static_ip_list) < 4: - raise Exception("Provide at-least 4 static IPs") + if len(static_ip_list) < 3: + raise Exception("Provide at-least 3 static IPs") payload = \ { "api_version": "3.0", @@ -194,11 +214,11 @@ def create(self, **kwargs) -> Dict: "buckets_infra_network_reference": { "kind": "subnet", - "uuid": network_uuid + "uuid": storage_network_uuid }, "client_access_network_reference": { "kind": "subnet", - "uuid": network_uuid + "uuid": public_network_uuid }, "aggregate_resources": { diff --git a/framework/scripts/python/helpers/pc_entity.py b/framework/scripts/python/helpers/pc_entity.py index ab58906..6db21c1 100644 --- a/framework/scripts/python/helpers/pc_entity.py +++ b/framework/scripts/python/helpers/pc_entity.py @@ -1,3 +1,4 @@ +import json from copy import deepcopy from typing import Optional from framework.helpers.rest_utils import RestAPIUtil @@ -57,3 +58,32 @@ def reference_spec(self): "uuid": "" } ) + + @staticmethod + def get_task_uuid(api_response: dict) -> str: + """ + Parse the api response to get the Task uuid + Args: + api_response(Dict): api response + Returns: + str : uuid + """ + task_uuid = None + # todo bug + # sometimes api_response in str + if isinstance(api_response, str): + try: + api_response = json.loads(api_response) + except Exception as e: + raise Exception(f"Cannot get task uuid from the response: {api_response}: {e}") + + if api_response and api_response.get('status', {}).get('execution_context', {}).get('task_uuid'): + task_uuid = api_response['status']['execution_context']['task_uuid'] + # In some cases only task_uuid is returned in response + elif api_response and api_response.get('task_uuid', {}): + task_uuid = api_response["task_uuid"] + + if not task_uuid: + raise Exception(f"Cannot get task uuid from the response: {api_response}") + + return task_uuid diff --git a/framework/scripts/python/helpers/ssh_cvm.py b/framework/scripts/python/helpers/ssh_cvm.py index d9d414b..ad27e0e 100644 --- a/framework/scripts/python/helpers/ssh_cvm.py +++ b/framework/scripts/python/helpers/ssh_cvm.py @@ -264,14 +264,14 @@ def download_files(self, url_list: List, timeout=300): Returns: dict: The command output """ - self.logger.info(f"Downloading files from URL(s): {url_list}") + self.logger.info(f"{self.cvm_ip} Downloading files from URL(s): {url_list}") status = False try: ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) cmd = ";".join(["wget -c --timestamp --no-check-certificate {}".format(url) for url in url_list]) cmd = "source /etc/profile;{}".format(cmd) out, err = self.execute_command(ssh_obj=ssh_obj, command=cmd, timeout=timeout) - self.logger.info(out) + self.logger.info(f"{self.cvm_ip} Download file(s) output: {out}") if err: return status, out + err return True, None @@ -411,3 +411,177 @@ def enable_replication_ports(self, ports: Optional[list] = None) -> (str, str): time.sleep(10) return receive, "Operation, timed out or failed!" + + def upgrade_cvm_foundation(self, tar_file_name: str): + """Upgrade CVM Foundation version + + Args: + tar_file_name (str): Foundation tar file + + Returns: + (bool, str): True is Downgrade is successful, else False, Error message if any + """ + # Execute update command + ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) + cmd = f"source /etc/profile;~/foundation/bin/foundation_upgrade -t {tar_file_name}" + out, err = self.execute_command(ssh_obj=ssh_obj, command=cmd, timeout=300) + self.logger.debug(f"{self.cvm_ip}: {out}") + + # Check for successful message in the output + if "Successfully completed upgrading foundation" in out: + # Remove the tar file from home directory + self.logger.info(f"{self.cvm_ip}: Remove file {tar_file_name}") + rm_cmd = f"source /etc/profile;rm {tar_file_name}" + self.execute_command(ssh_obj=ssh_obj, command=rm_cmd) + return True, None + return False, out + err + + def downgrade_cvm_foundation(self, tar_file_name: str): + """Downgrade CVM Foundation version to lower version + + Args: + tar_file_name (str): Foundation tar file + + Returns: + (bool, str): True is Downgrade is successful, else False, Error message if any + """ + ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) + foundation_bkp = "source /etc/profile;cp -rf foundation foundation_ztf_bkp" + delete_foundation_folder = "source /etc/profile;/bin/rm -rf foundation" + tar_foundation = f"source /etc/profile;tar -xf {tar_file_name}" + delete_foundationd_bkp = "source /etc/profile;/bin/rm -rf foundation_ztf_bkp" + timeout = 60 + # Backup foundation folder + self.logger.info(f"{self.cvm_ip}: Backing up foundation folder...") + out, err = self.execute_command(ssh_obj=ssh_obj, command=foundation_bkp, timeout=120) + foundation_backup_path = "/home/nutanix/foundation_ztf_bkp" + if not self.file_exists(foundation_backup_path): + return False, f"{self.cvm_ip}: Failed to backup foundation folder: {out} {err}" + + # Remove foundation folder + self.logger.info(f"{self.cvm_ip}: Removing foundation folder...") + out, err = self.execute_command(ssh_obj=ssh_obj, command=delete_foundation_folder, timeout=timeout) + foundation_path = "/home/nutanix/foundation" + if self.file_exists(foundation_path): + return False, f"{self.cvm_ip}: Failed to remove foundation folder: {out} {err}" + + # Untar foundation tar file + self.logger.info(f"{self.cvm_ip}: untar {tar_file_name} file...") + out, err = self.execute_command(ssh_obj=ssh_obj, command=tar_foundation, timeout=timeout) + foundation_path = "/home/nutanix/foundation" + if not self.file_exists(foundation_path): + self.logger.error(f"{self.cvm_ip}: Failed to downgrade foundation version. " \ + "Foundation folder was not created. Proceeding to restore the old version") + # Restore backup foundation folder if foundation folder is not created + restore_foundation_bkp = "source /etc/profile;cp -rf foundation_ztf_bkp foundation" + out, err = self.execute_command(ssh_obj=ssh_obj, command=restore_foundation_bkp, timeout=timeout) + if self.file_exists(foundation_path): + self.logger.warning("Restored old foundation version as Foundation downgrade failed") + # Delete foundation backup folder once restored + self.logger.info(f"{self.cvm_ip}: Deleting foundation backup folder") + out, err = self.execute_command(ssh_obj=ssh_obj, command=delete_foundationd_bkp, timeout=timeout) + if self.file_exists(foundation_backup_path): + self.logger.warning(f"{self.cvm_ip}: Failed to delete foundation backup folder") + else: + self.logger.error(f"{self.cvm_ip}: Restoring old foundation version failed. Please check the CVM node to restore the backup.") + return False, f"{self.cvm_ip}: Failed to downgrade Foundation version, as foundation folder was not created: {out} {err}" + else: + # Delete foundation backup folder + self.logger.info(f"{self.cvm_ip}: Deleting foundation backup folder") + out, err = self.execute_command(ssh_obj=ssh_obj, command=delete_foundationd_bkp, timeout=timeout) + if self.file_exists(foundation_backup_path): + self.logger.warning(f"{self.cvm_ip}: Failed to delete foundation backup folder") + return True, None + + def update_cvm_foundation(self, foundation_url_path: str, downgrade: bool = False) -> (str, str): + """ + Update CVM Foundation + Args: + foundation_url_path(str): Url to download Foundation tar file + + Returns: + (boolean, str): True if foundation update is successful else Fasle, Error message if any + """ + status = False + try: + # Downloading files for update + self.download_files(url_list=[foundation_url_path]) + tar_file_name = foundation_url_path.split('/')[-1] + file_path = f"/home/nutanix/{tar_file_name}" + if not self.file_exists(file_path): + return status, f"{self.cvm_ip}: Failed to download files to CVM. File {file_path} does not exists." + + # Execute update command + ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) + if not downgrade: + update_status, error = self.upgrade_cvm_foundation(tar_file_name) + else: + update_status, error = self.downgrade_cvm_foundation(tar_file_name) + if not update_status: + return update_status, error + + # Delete foundation tar file + self.logger.info(f"{self.cvm_ip}: Deleting foundation {tar_file_name} file") + self.execute_command(ssh_obj=ssh_obj, command=f"source /etc/profile;/bin/rm {tar_file_name}") + if self.file_exists(file_path): + self.logger.warning(f"{self.cvm_ip}: Failed to delete foundation file {tar_file_name}") + + # Stop Foundation and restart genesis + int_chan = self.get_interactive_shell(ssh_obj) + stop_foundation = self.stop_foundation(int_chan) + if stop_foundation: + if not self.restart_genesis(int_chan): + return status, f"{self.cvm_ip}: Failed to restart genesis" + else: + return status, f"{self.cvm_ip}: Failed to stop foundation" + return True, None + except Exception as e: + return status, e + + def get_foundation_version(self) -> (str, str): + """Get the CVM Foundation version + + Returns: + (boolean, str): True if successfully fected the foundation version else Fasle, Error message if any + """ + status = False + try: + ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) + cmd = "source /etc/profile;cat ~/foundation/foundation_version" + out, err = self.execute_command(ssh_obj=ssh_obj, command=cmd) + self.logger.info(f"{self.cvm_ip} Foundation version is {out}") + if err: + return status, err + return True, out + except Exception as e: + return status, e + + def update_resolv_conf(self, nameserver: str) -> (str, str): + """Update the /etc/resolv.conf file + + Args: + nameserver (str): Nameserver to add to the /etc/resolv.conf + + Returns: + (boolean, str): True if resolv.conf update is successful else Fasle, Error message if any + """ + status = False + try: + ssh_obj = self.get_ssh_connection(self.cvm_ip, self.cvm_username, self.cvm_password) + cat_resolv_config_cmd = f"source /etc/profile;sudo cat /etc/resolv.conf | grep {nameserver}" + out, err = self.execute_command(ssh_obj=ssh_obj, command=cat_resolv_config_cmd) + if nameserver in out or nameserver in err: + self.logger.info(f"{self.cvm_ip}: Nameserver {nameserver} already exists in /etc/resolv.conf") + return True, None + time.sleep(2) + self.logger.info(f"{self.cvm_ip} - Updating nameserver {nameserver} in /etc/resolv.conf") + # in-order to edit resolv.conf, chattr -i must be executed before editing & reverted back to chattr +i after everything + cmds = ["source /etc/profile;sudo chattr -i /etc/resolv.conf", + f"source /etc/profile;sudo sed -i '$ a\\nameserver {nameserver}' /etc/resolv.conf", + "source /etc/profile;sudo chattr +i /etc/resolv.conf"] + for cmd in cmds: + self.execute_command(ssh_obj=ssh_obj, command=cmd) + time.sleep(2) + return True, None + except Exception as e: + return status, e diff --git a/framework/scripts/python/helpers/v3/identity_provider.py b/framework/scripts/python/helpers/v3/identity_provider.py new file mode 100644 index 0000000..a347d7c --- /dev/null +++ b/framework/scripts/python/helpers/v3/identity_provider.py @@ -0,0 +1,69 @@ +from framework.helpers.rest_utils import RestAPIUtil +from ..pc_entity import PcEntity + + +class IdentityProvider(PcEntity): + kind = "identity_provider" + version = "3.1" + + def __init__(self, session: RestAPIUtil): + self.resource_type = "/identity_providers" + super(IdentityProvider, self).__init__(session=session) + + # The create_idp method creates a new identity provider with the given parameters. + def create_idp(self, name: str, idp_metadata: str = None, idp_properties: dict = None, **kwargs): + """ + Create Identity Provider + Args: + name(str): Name of the Identity Provider + idp_metadata(str): IDP Metadata + idp_properties(dict): IDP Config + + Returns: + The json response returned by API. + """ + payload = self.get_payload(name, idp_metadata, idp_properties, **kwargs) + return self.create(data=payload) + + def get_payload(self, name: str, idp_metadata: str = None, idp_properties: dict = None, **kwargs): + """ + Generated payload to create an Identity Provider + Args: + name(str): Name of the Identity Provider + idp_metadata(str): IDP Metadata + idp_properties(dict): IDP Config + + Returns: + payload + """ + + # The payload for the API request is created with the given parameters + payload = { + "spec": { + "name": name, + }, + "metadata": {"kind": self.kind}, + "api_version": self.version + } + + if "username_attr" in kwargs: + payload["spec"]["username_attr"] = kwargs["username_attr"] + if "email_attr" in kwargs: + payload["spec"]["email_attr"] = kwargs["email_attr"] + if "groups_attr" in kwargs: + payload["spec"]["groups_attr"] = kwargs["groups_attr"] + if "groups_delim" in kwargs: + payload["spec"]["groups_delim"] = kwargs["groups_delim"] + + # If idp_metadata is provided, it is added to the payload + if idp_metadata is not None: + payload["spec"]["resources"] = { + "idp_metadata": idp_metadata + } + # If idp_properties is provided, it is added to the payload + elif idp_properties is not None: + payload["spec"]["resources"] = { + "idp_properties": idp_properties + } + + return payload diff --git a/framework/scripts/python/nke/create_nke_clusters.py b/framework/scripts/python/nke/create_nke_clusters.py index d8f1f08..166fc9d 100644 --- a/framework/scripts/python/nke/create_nke_clusters.py +++ b/framework/scripts/python/nke/create_nke_clusters.py @@ -1,3 +1,4 @@ +from copy import deepcopy from typing import Dict from framework.helpers.log_utils import get_logger from framework.scripts.python.helpers.karbon.karbon_clusters import KarbonCluster, KarbonClusterV1 @@ -14,10 +15,11 @@ class CreateKarbonClusterPc(Script): Class that creates NKE Clusters in PC """ - def __init__(self, data: Dict, **kwargs): + def __init__(self, data: Dict, global_data: Dict = None, **kwargs): self.task_uuid_list = [] self.response = None self.data = data + self.global_data = deepcopy(global_data) if global_data else {} self.pc_session = self.data["pc_session"] super(CreateKarbonClusterPc, self).__init__(**kwargs) self.logger = self.logger or logger @@ -28,9 +30,12 @@ def execute(self, **kwargs): if not self.data.get("nke_clusters"): self.logger.warning(f"Skipping NKE Clusters creation in {self.data['pc_ip']!r}") return - + if not self.data.get("vaults"): + self.data["vaults"] = self.global_data.get("vaults") + if not self.data.get("vault_to_use"): + self.data["vault_to_use"] = self.global_data.get("vault_to_use") karbon_cluster = KarbonCluster(self.pc_session) - karbon_cluster_v1 = KarbonClusterV1(self.pc_session, ) + karbon_cluster_v1 = KarbonClusterV1(self.pc_session, self.data) existing_clusters_list = karbon_cluster.list() existing_clusters_name_list = [existing_cluster.get("cluster_metadata", {}).get("name") for existing_cluster in existing_clusters_list] diff --git a/framework/scripts/python/objects/configure_objects.py b/framework/scripts/python/objects/configure_objects.py index 74e476c..10dfc49 100644 --- a/framework/scripts/python/objects/configure_objects.py +++ b/framework/scripts/python/objects/configure_objects.py @@ -34,8 +34,10 @@ def execute(self): objects_batch_scripts = BatchScript(results_key=self.results_key) + if "enable_objects" in self.data and self.data["enable_objects"] is True: + objects_batch_scripts.add(EnableObjects(self.data, log_file=self.log_file)) + objects_batch_scripts.add_all([ - EnableObjects(self.data, log_file=self.log_file), CreateObjectStore(self.data, log_file=self.log_file), AddDirectoryServiceOss(self.data, log_file=self.log_file), AddAdUsersOss(self.data, log_file=self.log_file), diff --git a/framework/scripts/python/pc/configure_pc.py b/framework/scripts/python/pc/configure_pc.py index ca9b6e1..97fb3ff 100644 --- a/framework/scripts/python/pc/configure_pc.py +++ b/framework/scripts/python/pc/configure_pc.py @@ -1,6 +1,7 @@ -import time from copy import deepcopy from typing import Optional, Dict +from framework.scripts.python.pc.create.create_identity_provider import CreateIdp +from framework.scripts.python.objects.configure_objects import OssConfig from framework.scripts.python.pc.other_ops.accept_eula import AcceptEulaPc from framework.scripts.python.pc.other_ops.change_default_system_password import ChangeDefaultAdminPasswordPc from framework.scripts.python.pc.other_ops.update_pulse_pc import UpdatePulsePc @@ -21,7 +22,7 @@ from framework.scripts.python.pc.create.add_ntp_server_pc import AddNtpServersPc from framework.scripts.python.pc.create.create_rolemapping_pc import CreateRoleMappingPc from framework.helpers.log_utils import get_logger -from framework.helpers.helper_functions import read_creds, create_pc_objects +from framework.helpers.helper_functions import create_pc_objects logger = get_logger(__name__) @@ -47,16 +48,6 @@ def execute(self): if not self.data.get("vault_to_use"): self.data["vault_to_use"] = self.global_data.get("vault_to_use") - # block_credential = self.data.get("pc_credential") - # # get credentials from the payload - # if block_credential: - # try: - # self.data["pc_username"], self.data["pc_password"] = read_creds(data=self.data, - # credential=block_credential) - # except Exception as e: - # self.exceptions.append(e) - # return - # Get PC session if not self.data.get("pc_session"): create_pc_objects(self.data, global_data=self.global_data) @@ -75,17 +66,18 @@ def execute(self): if "pc_directory_services" in self.data or "directory_services" in self.data: # Add Auth -> needs PC config pc_batch_scripts.add(AddAdServerPc(self.data, log_file=self.log_file)) - time.sleep(10) + if "pc_saml_idp_configs" in self.data or "saml_idp_configs" in self.data: + pc_batch_scripts.add(CreateIdp(self.data, log_file=self.log_file)) # Add Role-mappings -> needs AddAdServer # Add NTP servers -> InitialPcConfig # Add Name servers -> InitialPcConfig pc_enable_scripts = BatchScript(parallel=True) - if "enable_microsegmentation" in self.data: + if "enable_microsegmentation" in self.data and self.data["enable_microsegmentation"] is True: pc_enable_scripts.add(EnableMicrosegmentation(self.data, log_file=self.log_file)) - if "enable_dr" in self.data: + if "enable_dr" in self.data and self.data["enable_dr"] is True: pc_enable_scripts.add(EnableDR(self.data, log_file=self.log_file)) - if "enable_nke" in self.data: + if "enable_nke" in self.data and self.data["enable_nke"] is True: pc_enable_scripts.add(EnableNke(self.data, log_file=self.log_file)) if "remote_azs" in self.data: pc_enable_scripts.add(ConnectToAz(self.data, log_file=self.log_file)) @@ -129,6 +121,13 @@ def execute(self): # create RP -> needs CreateProtectionPolicy if "recovery_plans" in self.data: pc_batch_scripts.add(CreateRecoveryPlan(self.data, log_file=self.log_file)) + + if "objects" in self.data or "enable_objects" in self.data: + # Create objects + if self.data.get("objects", {}).get("objectstores"): + pc_batch_scripts.add(OssConfig(data=deepcopy(self.data), global_data=self.data, + results_key="objects", + log_file="objects_ops.log")) self.results.update(pc_batch_scripts.run()) self.data["json_output"] = self.results except Exception as e: diff --git a/framework/scripts/python/pc/create/create_identity_provider.py b/framework/scripts/python/pc/create/create_identity_provider.py new file mode 100644 index 0000000..87db7f9 --- /dev/null +++ b/framework/scripts/python/pc/create/create_identity_provider.py @@ -0,0 +1,127 @@ +import os +from copy import deepcopy +from typing import Dict +from urllib.request import urlopen +from framework.helpers.log_utils import get_logger +from framework.scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor +from framework.scripts.python.helpers.v3.identity_provider import IdentityProvider +from framework.scripts.python.script import Script + +logger = get_logger(__name__) + + +class CreateIdp(Script): + """ + Class that creates an Identity Provider in PC + """ + + def __init__(self, data: Dict, **kwargs): + self.data = data + self.pc_session = self.data["pc_session"] + self.idp_configs = self.data.get("saml_idp_configs") + super(CreateIdp, self).__init__(**kwargs) + self.logger = self.logger or logger + + def execute(self, **kwargs): + try: + if self.idp_configs: + idp_payload_list = [] + idp_op = IdentityProvider(self.pc_session) + + for idp_config in deepcopy(self.idp_configs): + self.logger.info(f"Adding Identity Providers in {self.data['pc_ip']!r}") + + if not (idp_name := idp_config.pop("name", "")): + self.exceptions.append("Need IDP Name") + return + + idp_properties = idp_config.pop("idp_properties", {}) + idp_metadata = None + + if (not idp_config.get("metadata_url") and not idp_properties and + not idp_config.get("metadata_path")): + self.exceptions.append("Need either Metadata URL or Metadata Path or IDP Properties") + continue + + if idp_config.get("metadata_url"): + metadata_url = idp_config.pop("metadata_url") + try: + idp_metadata = urlopen(metadata_url).read() + idp_metadata = idp_metadata.decode("utf-8") + except Exception as e: + self.exceptions.append(f"Failed to read metadata from " + f"{metadata_url!r} with the error: {e}") + continue + elif idp_config.get("metadata_path"): + metadata_path = idp_config.pop("metadata_path") + try: + metadata_path = os.path.join(self.data["project_root"], metadata_path) + if os.path.isfile(metadata_path): + with open(metadata_path, "r") as file: + idp_metadata = file.read() + else: + self.exceptions.append(f"Metadata file {metadata_path!r} not found") + continue + except Exception as e: + self.exceptions.append(f"Failed to read metadata from {metadata_path!r} with the error: " + f"{e}") + continue + else: + idp_list = idp_op.list() + idp_url_list = [idp["spec"]["resources"]["idp_url"] for idp in idp_list + if idp.get("spec", {}).get("resources", {}).get("idp_url")] + if idp_properties.get("idp_url") in idp_url_list: + self.exceptions.append(f"IDP with URL {idp_properties.get('idp_url')!r} already exists") + continue + + payload = idp_op.get_payload(name=idp_name, idp_metadata=idp_metadata, + idp_properties=idp_properties, **idp_config) + idp_payload_list.append(payload) + + if not idp_payload_list: + self.logger.warning(f"No IDPs to create, skipping adding Identity Providers in " + f"{self.data['pc_ip']!r}") + return + + logger.info(f"Trigger batch create API for Identity Providers in {self.data['pc_ip']!r}") + task_uuid_list = idp_op.batch_op.batch_create(request_payload_list=idp_payload_list) + + # Monitor the tasks + if task_uuid_list: + app_response, status = PcTaskMonitor(self.pc_session, + task_uuid_list=task_uuid_list).monitor() + + if app_response: + self.exceptions.append(f"Some tasks have failed. {app_response}") + + if not status: + self.exceptions.append( + "Timed out. Creation of Identity Providers in PC didn't happen in the prescribed timeframe") + else: + self.logger.info(f"No IDP config found, skipping adding Identity Providers in {self.data['pc_ip']!r}") + except Exception as e: + self.exceptions.append(e) + + def verify(self, **kwargs): + if not self.idp_configs: + return + + # Initial status + self.results["CreateIdentityProviders"] = {} + + idp_op = IdentityProvider(self.pc_session) + idp_list = [] + idp_name_list = [] + + for idp_config in self.idp_configs: + # Initial status + self.results["CreateIdentityProviders"][idp_config['name']] = "CAN'T VERIFY" + + idp_list = idp_list or idp_op.list() + idp_name_list = idp_name_list or [idp.get("spec", {}).get("name") + for idp in idp_list if + idp.get("spec", {}).get("name")] + if idp_config['name'] in idp_name_list: + self.results["CreateIdentityProviders"][idp_config['name']] = "PASS" + else: + self.results["CreateIdentityProviders"][idp_config['name']] = "FAIL" diff --git a/framework/scripts/python/pc/create/create_service_groups_pc.py b/framework/scripts/python/pc/create/create_service_groups_pc.py index 7a1d83f..cbad6aa 100644 --- a/framework/scripts/python/pc/create/create_service_groups_pc.py +++ b/framework/scripts/python/pc/create/create_service_groups_pc.py @@ -1,7 +1,6 @@ import time from typing import Dict from framework.helpers.log_utils import get_logger -from framework.scripts.python.helpers.state_monitor.pc_task_monitor import PcTaskMonitor from framework.scripts.python.helpers.v3.service_group import ServiceGroup from framework.scripts.python.script import Script diff --git a/framework/scripts/python/pc/fc/foundation_script.py b/framework/scripts/python/pc/fc/foundation_script.py index f7d5491..1dba013 100644 --- a/framework/scripts/python/pc/fc/foundation_script.py +++ b/framework/scripts/python/pc/fc/foundation_script.py @@ -81,10 +81,11 @@ def update_node_ip_details(self, existing_node_detail_dict: Dict, cluster_node_d """ for node in cluster_node_details: node_info = existing_node_detail_dict[node["node_serial"]] + hypervisor_hostname = node.get("hypervisor_hostname", node_info["hypervisor_hostname"]) # If there is ipam_obj, fetch IPs from IPAM & Create host record if self.ipam_obj: host_ip, error = self.get_ip_and_create_host_record( - fqdn=f"{node['node_serial']}-ahv.{network['domain']}", subnet=network.get("host_subnet"), + fqdn=f"{hypervisor_hostname}.{network['domain']}", subnet=network.get("host_subnet"), ip=node.get("host_ip")) if error: return False, f"Failed to update Host IP: {error}" @@ -106,6 +107,7 @@ def update_node_ip_details(self, existing_node_detail_dict: Dict, cluster_node_d node_info["hypervisor_gateway"] = network["host_gateway"] node_info["hypervisor_netmask"] = get_subnet_mask(subnet=network["host_subnet"]) node_info["cvm_ip"] = cvm_ip + node_info["hypervisor_hostname"] = hypervisor_hostname node_info["cvm_gateway"] = network["host_gateway"] node_info["cvm_netmask"] = get_subnet_mask(subnet=network["host_subnet"]) node_info["ipmi_ip"] = ipmi_ip diff --git a/framework/scripts/python/pe/configure_cluster.py b/framework/scripts/python/pe/configure_cluster.py index 473f46e..b3b3889 100644 --- a/framework/scripts/python/pe/configure_cluster.py +++ b/framework/scripts/python/pe/configure_cluster.py @@ -15,6 +15,8 @@ from framework.scripts.python.script import Script from framework.scripts.python.pe.create.add_ntp_server_pe import AddNtpServersPe from framework.scripts.python.pe.create.add_name_server_pe import AddNameServersPe +from framework.scripts.python.pe.update.ha_reservation import HaReservation +from framework.scripts.python.pe.update.rebuild_capacity_reservation import RebuildCapacityReservation from framework.helpers.log_utils import get_logger from framework.helpers.helper_functions import create_pe_objects @@ -57,11 +59,13 @@ def execute(self): AcceptEulaPe(self.data, log_file=self.log_file), UpdatePulsePe(self.data, log_file=self.log_file), AddAdServerPe(self.data, log_file=self.log_file), - # OpenRepPort(self.data, log_file=self.log_file), + OpenRepPort(self.data, log_file=self.log_file), CreateContainerPe(self.data, log_file=self.log_file), AddNtpServersPe(self.data, log_file=self.log_file), AddNameServersPe(self.data, log_file=self.log_file), - CreateSubnetPe(self.data, log_file=self.log_file) + CreateSubnetPe(self.data, log_file=self.log_file), + HaReservation(self.data, log_file=self.log_file), + RebuildCapacityReservation(self.data, log_file=self.log_file) ]) if not self.data.get("skip_pc_registration") and self.data.get("pc_ip") and self.data.get("pc_credential"): main_cluster_batch_scripts.add(RegisterToPc(self.data, log_file=self.log_file)) diff --git a/framework/scripts/python/pe/update/ha_reservation.py b/framework/scripts/python/pe/update/ha_reservation.py index ff3a7dc..221d523 100644 --- a/framework/scripts/python/pe/update/ha_reservation.py +++ b/framework/scripts/python/pe/update/ha_reservation.py @@ -28,19 +28,19 @@ def execute_single_cluster(self, cluster_ip: str, cluster_details: Dict): f" Skipping...'") return - pe_session = cluster_details["pe_session"] - cluster = PeCluster(pe_session) - cluster.get_cluster_info() - cluster_details["cluster_info"].update(cluster.cluster_info) - cluster_info = f"{cluster_ip}/ {cluster_details['cluster_info']['name']}" if ( - 'name' in cluster_details['cluster_info']) else f"{cluster_ip}" + try: + pe_session = cluster_details["pe_session"] + cluster = PeCluster(pe_session) + cluster.get_cluster_info() + cluster_details["cluster_info"].update(cluster.cluster_info) + cluster_info = f"{cluster_ip}/ {cluster_details['cluster_info']['name']}" if ( + 'name' in cluster_details['cluster_info']) else f"{cluster_ip}" - if cluster_details['cluster_info']["num_nodes"] == 1: - self.logger.warning(f"HA reservation is not supported for single node cluster '{cluster_ip}/ {cluster_details['cluster_info']['name']}'." - f" Skipping...'") - return + if cluster_details['cluster_info']["num_nodes"] == 1: + self.logger.warning(f"HA reservation is not supported for single node cluster '{cluster_ip}/ {cluster_details['cluster_info']['name']}'." + f" Skipping...'") + return - try: ha_op = HA(session=pe_session) response = ha_op.read() if response["numHostFailuresToTolerate"] == cluster_details["ha_reservation"].get("num_host_failure_to_tolerate", 1): diff --git a/framework/scripts/python/pe/update/rebuild_capacity_reservation.py b/framework/scripts/python/pe/update/rebuild_capacity_reservation.py index 14f0b18..d7239ef 100644 --- a/framework/scripts/python/pe/update/rebuild_capacity_reservation.py +++ b/framework/scripts/python/pe/update/rebuild_capacity_reservation.py @@ -27,26 +27,26 @@ def execute_single_cluster(self, cluster_ip: str, cluster_details: Dict): f" Skipping...'") return - pe_session = cluster_details["pe_session"] - cluster = PeCluster(pe_session) - cluster.get_cluster_info() - cluster_details["cluster_info"].update(cluster.cluster_info) - cluster_info = f"{cluster_ip}/ {cluster_details['cluster_info']['name']}" if ( - 'name' in cluster_details['cluster_info']) else f"{cluster_ip}" + try: + pe_session = cluster_details["pe_session"] + cluster = PeCluster(pe_session) + cluster.get_cluster_info() + cluster_details["cluster_info"].update(cluster.cluster_info) + cluster_info = f"{cluster_ip}/ {cluster_details['cluster_info']['name']}" if ( + 'name' in cluster_details['cluster_info']) else f"{cluster_ip}" - # Check if the cluster size is equal to or than 2, as rebuild is not supported for 1 and 2 node clusters. - if cluster_details["cluster_info"]["num_nodes"] <= 2: - self.logger.warning("Rebuild reservation feature can not be enabled for 1 and 2 node cluster " - f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'. Skipping...'") - return + # Check if the cluster size is equal to or than 2, as rebuild is not supported for 1 and 2 node clusters. + if cluster_details["cluster_info"]["num_nodes"] <= 2: + self.logger.warning("Rebuild reservation feature can not be enabled for 1 and 2 node cluster " + f"'{cluster_ip}/ {cluster_details['cluster_info']['name']}'. Skipping...'") + return - # Check if enable_rebuild_reservation is already in required state - if cluster_details["cluster_info"]["enable_rebuild_reservation"] == cluster_details["enable_rebuild_reservation"]: - self.logger.warning(f"Enable Rebuild is already {cluster_details['enable_rebuild_reservation']} " - f"for '{cluster_ip}/ {cluster_details['cluster_info']['name']}'") - return + # Check if enable_rebuild_reservation is already in required state + if cluster_details["cluster_info"]["enable_rebuild_reservation"] == cluster_details["enable_rebuild_reservation"]: + self.logger.warning(f"Enable Rebuild is already {cluster_details['enable_rebuild_reservation']} " + f"for '{cluster_ip}/ {cluster_details['cluster_info']['name']}'") + return - try: self.logger.info(f"Updating Rebuild Reservation in {cluster_info!r}") rebuild_op = Cluster(session=pe_session) response = rebuild_op.update_rebuild_reservation(cluster_details.get("enable_rebuild_reservation")) diff --git a/framework/scripts/python/script.py b/framework/scripts/python/script.py index bab90ac..b3fc788 100644 --- a/framework/scripts/python/script.py +++ b/framework/scripts/python/script.py @@ -27,7 +27,7 @@ def run(self, **kwargs): self.verify(**kwargs) except Exception as e: self.logger.debug(e) - self.logger.info(f"Exception occurred during the verification of {type(self).__name__!r}") + self.logger.info(f"Exception occurred during the verification of {type(self).__name__!r}: {e}") if self.exceptions: for exception in self.exceptions: diff --git a/main.py b/main.py index 407e123..0c65c26 100644 --- a/main.py +++ b/main.py @@ -70,7 +70,7 @@ def main(): scripts = [ClusterConfig] elif workflow_type == "deploy-pc": schema = DEPLOY_PC_CONFIG_SCHEMA - pre_run_actions += [create_pc_objects] + pre_run_actions += [create_pe_objects] post_run_actions.insert(0, generate_html_from_json) scripts = [DeployPC] elif workflow_type == "config-pc": @@ -116,7 +116,7 @@ def main(): logger.error("Invalid Script specified. Specify the correct Script and try again") raise ModuleNotFoundError(e) - pre_run_actions = [get_input_data, validate_input_data] + pre_run_actions = [get_input_data, get_creds_from_vault, validate_input_data] post_run_actions = [save_logs, generate_html_from_json] # Check if schema is valid @@ -169,4 +169,4 @@ def main(): # Call the main function debug = args.debug ConfigureRootLogger(debug) - main() \ No newline at end of file + main() diff --git a/releases/1.3.1/README.md b/releases/1.3.1/README.md new file mode 100644 index 0000000..aced6af --- /dev/null +++ b/releases/1.3.1/README.md @@ -0,0 +1,26 @@ +# v1.3.1 + +## CVM Foundation version enhancement + +- An additional script has been included to modify the CVM Foundation version (either upgrade or downgrade) + +## Create Identity Provider (IDP) in IAM + +- We now facilitate the setup of any IDPs compliant with SAML 2.0 in Prism Central + +## Hypervisor hostname Configuration in Imaging + +- The "hypervisor_hostname" can now be defined in [pod-deploy.yml](../../config/example-configs/pod-configs/pod-deploy.yml) + to configure the Hypervisor hostname during Imaging + +## Objects + +- It is now possible to define "storage_network" and "public_network" separately when creating an Objectstore + +## Cyberark + +- The Cyberark integration has been enhanced to support the Cyberark AIM API + +## Cluster Configuration workflow + +- HA reservation and Rebuild Capacity Reservation are now supported in the Cluster Configuration workflow \ No newline at end of file diff --git a/requirements/prod.txt b/requirements/prod.txt index 16a6c12..9b3500d 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -166,6 +166,8 @@ wcwidth==0.2.13 # via # asciimatics # prompt-toolkit +wheel==0.43.0 + # via cx-freeze zipp==3.18.1 # via importlib-metadata