From 461cd55685397ed7a597d33eb63484925b81d182 Mon Sep 17 00:00:00 2001 From: Pierre Fenoll Date: Thu, 1 Sep 2022 21:29:15 +0200 Subject: [PATCH] reproduce #570 Signed-off-by: Pierre Fenoll --- openapi3/issue570_test.go | 27 + ...ithub.io,api-doc-internal-6.0,openapi.json | 79790 ++++++++++++++++ 2 files changed, 79817 insertions(+) create mode 100644 openapi3/issue570_test.go create mode 100644 openapi3/testdata/https:,,rubrikinc.github.io,api-doc-internal-6.0,openapi.json diff --git a/openapi3/issue570_test.go b/openapi3/issue570_test.go new file mode 100644 index 000000000..12ad733e9 --- /dev/null +++ b/openapi3/issue570_test.go @@ -0,0 +1,27 @@ +package openapi3 + +import ( + // "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +// func TestIssue570FromURIFIXMEReplaceMe(t *testing.T) { +// uri, err := url.Parse("https://rubrikinc.github.io/api-doc-internal-6.0/openapi.json") +// require.NoError(t, err) +// https://github.com/getkin/kin-openapi/pull/571 +// loader := NewLoader() +// doc, err := loader.LoadFromURI(uri) +// require.NoError(t, err) +// err = doc.Validate(loader.Context) +// require.NoError(t, err) +// } + +func TestIssue570TODOMinimizeMe(t *testing.T) { + loader := NewLoader() + doc, err := loader.LoadFromFile("testdata/https:,,rubrikinc.github.io,api-doc-internal-6.0,openapi.json") + require.NoError(t, err) + err = doc.Validate(loader.Context) + require.NoError(t, err) +} diff --git a/openapi3/testdata/https:,,rubrikinc.github.io,api-doc-internal-6.0,openapi.json b/openapi3/testdata/https:,,rubrikinc.github.io,api-doc-internal-6.0,openapi.json new file mode 100644 index 000000000..43dd39328 --- /dev/null +++ b/openapi3/testdata/https:,,rubrikinc.github.io,api-doc-internal-6.0,openapi.json @@ -0,0 +1,79790 @@ +{ + "swagger": "2.0", + "info": { + "version": "internal", + "title": "Rubrik INTERNAL REST API", + "description": "Copyright © 2017-2021 Rubrik Inc.\n\n# Introduction\n\nThis is the INTERNAL REST API for Rubrik. We don't guarantee support or backward compatibility. Use at your own risk.\n\n# Changelog\n\n Revisions are listed with the most recent revision first.\n ### Changes to Internal API in Rubrik version 6.0\n ## Breaking changes:\n * Renamed field `node` to `nodeId` for object `NetworkInterface` used by\n `GET /cluster/{id}/network_interface`.\n * Removed `compliance24HourStatus` in `DataSourceTableRequest` for\n `POST /report/data_source/table`.\n Use `complianceStatus`, `awaitingFirstFull`, and `snapshotRange`\n as replacements.\n * Changed the sort_by attribute of `GET /vcd/vapp` to use\n `VcdVappObjectSortAttribute`.\n This attribute no longer uses the `VappCount` or `ConnectionStatus`\n parameters from the previously used `VcdHierarchyObjectSortAttribute`.\n\n ## Feature additions/improvements:\n * Added the `GET /sla_domain/{id}/protected_objects` endpoint to return\n objects explicitly protected by the SLA Domain with direct assignments.\n * Added new field `nodeName` for object `NetworkInterface` used by\n `GET /cluster/{id}/network_interface`.\n * Added the `POST /cluster/{id}/remove_nodes` endpoint to trigger a bulk\n node removal job.\n * Added new optional field `numChannels` to `ExportOracleDbConfig` object\n specifying the number of channels used during Oracle clone or same-host\n recovery.\n * Added new optional fields `forceFull` to the object\n `HypervVirtualMachineSummary` used by `GET /hyperv/vm`. This field is also\n used in `HypervVirtualMachineDetail` used by `GET /hyperv/vm/{id}` and\n `PATCH /hyperv/vm/{id}`.\n * Added the `GET /cluster/{id}/bootstrap_config` endpoint to enable Rubrik CDM\n to retrieve Rubrik cluster configuration information for the cluster nodes.\n * Added new optional field clusterUuid to the ClusterConfig object used\n by `POST /cluster/{id}/bootstrap` and `POST /cluster/{id}/setupnetwork`.\n * Added new optional fields `dataGuardGroupId` and `dataGuardGroupName` to\n the object `OracleHierarchyObjectSummary` used by\n `GET /oracle/hierarchy/{id}`, `GET /oracle/hierarchy/{id}/children`, and\n `GET /oracle/hierarchy/{id}/descendants`.\n * Added new optional fields `dataGuardGroupId` and `dataGuardGroupName` to\n the object `OracleDbSummary` used by `GET /oracle/db`.\n * Added new optional fields `dataGuardGroupId` and `dataGuardGroupName` to\n the object `OracleDbDetail` used by `GET /oracle/db/{id}` and\n `PATCH /oracle/db/{id}`.\n * Added a new optional field `immutabilityLockSummary` to the object\n `ArchivalLocationSummary` returned by GET `/archive/location` and\n GET `/organization/{id}/archive/location`\n * Added new optional fields `dbUniqueName` and `databaseRole` to the object\n `OracleHierarchyObjectSummary` used by `GET /oracle/hierarchy/{id}`,\n `GET /oracle/hierarchy/{id}/children`, and\n `GET /oracle/hierarchy/{id}/descendants`.\n * Added new required fields `dbUniqueName` and `databaseRole` to the object\n `OracleDbSummary` used by `GET /oracle/db`.\n * Added a new required field `databaseRole` to the object `OracleDbDetail`\n used by `GET /oracle/db/{id}` and `PATCH /oracle/db/{id}`.\n * Added a new optional field `subnet` to `ManagedVolumeUpdate`, used by \n `PATCH /managed_volume/{id}` for updating the subnet to which the node IPs\n will belong during an SLA MV backup.\n * Added new optional field `numChannels` to `RecoverOracleDbConfig`\n and `MountOracleDbConfig` objects specifying the number of channels used\n during Oracle recovery.\n * Added a new optional field `immutabilityLockSummary` to the object\n `ObjectStoreLocationSummary` and `ObjectStoreUpdateDefinition` used by\n `GET/POST /archive/object_store` and `GET/POST /archive/object_store/{id}`\n * Added a new optional field `errorMessage` to `SupportTunnelInfo` object \n used by `GET /node/{id}/support_tunnel` and\n `PATCH /node/{id}/support_tunnel`.\n * Added new optional field `cloudStorageLocation` to the `ClusterConfig`\n object used by `POST /cluster/{id}/bootstrap`.\n * Added new enum `Disabled` to `DataLocationOwnershipStatus`\n used by `ArchivalLocationSummary`\n * Added a new optional field `installTarball` to the `ClusterConfig`\n object used by `POST /cluster/{id}/bootstrap`.\n * Added a new optional field `clusterInstall` to the `ClusterConfigStatus`\n object used by `GET /cluster/{id}/bootstrap`.\n * Added the `GET /cluster/{id}/install` endpoint to return the current\n status of Rubrik CDM install on a cluster.\n * Added the `POST /cluster/{id}/install` endpoint to allow Rubrik CDM \n install on cluster nodes which are not bootstrapped.\n * Added the `GET /cluster/{id}/packages` endpoint to return the list of\n Rubrik CDM packages available for installation.\n * Updated `request_id` parameter in the `GET /cluster/{id}/bootstrap` \n endpoint, as not required.\n * Updated `request_id` parameter in the `GET /cluster/{id}/install` \n endpoint, as not required.\n * Updated `BootstrappableNodeInfo` returned by `GET /cluster/{id}/discover`\n endpoint to include the `version` field, to indicate the\n Rubrik CDM software version.\n * Added a new optional field `isSetupNetworkOnly` to the `ClusterConfig`\n object used by `POST /cluster/{id}/setupnetwork`.\n * Added the `POST /cluster/{id}/setupnetwork` endpoint to enable Rubrik CDM\n to perform network setup on nodes that are not bootstrapped.\n * Added the `GET /cluster/{id}/setupnetwork` endpoint to return the current\n status of setup network command on node or nodes.\n * Added a new optional field `hostname` to the `NodeStatus` object used by\n `GET /cluster/{id}/node`, `GET /node`, `GET /node/stats`, `GET /node/{id}`,\n and `GET /node/{id}/stats`.\n * Added new optional fields `usedFastVhdx` and `fileSizeInBytes` to the\n `HypervVirtualMachineSnapshotSummary` returned by the API\n `GET /hyperv/vm/{id}/snapshot`.\n * Added the `GET /archive/location/request/{id}` endpoint to query the status\n of asynchronous archival location requests.\n\n ## Deprecation:\n * Deprecated the following Oracle endpoints\n * `GET /oracle/db`\n * `GET /oracle/db/{id}`\n * `PATCH /oracle/db/{id}`\n * Deprecated the following vcd hierarchy endpoints. \n * `GET /vcd/hierarchy/{id}`\n * `GET /vcd/hierarchy/{id}/children`\n * `GET /vcd/hierarchy/{id}/descendants`\n * Deprecated the following vcd cluster endpoints.\n * `GET /vcd/cluster`\n * `POST /vcd/cluster`\n * `GET /vcd/cluster/{id}/vimserver`\n * `POST /vcd/cluster/{id}/refresh`\n * `GET /vcd/cluster/{id}`\n * `PATCH /vcd/cluster/{id}`\n * `DELETE /vcd/cluster/{id}`\n * `GET /vcd/cluster/request/{id}`\n * Deprecated the following vcd vapp endpoints.\n * `GET /vcd/vapp`\n * `GET /vcd/vapp/{id}`\n * `PATCH /vcd/vapp/{id}`\n * `GET /vcd/vapp/{id}/snapshot`\n * `POST /vcd/vapp/{id}/snapshot`\n * `DELETE /vcd/vapp/{id}/snapshot`\n * `GET/vcd/vapp/snapshot/{id}`\n * `DELETE /vcd/vapp/snapshot/{id}`\n * `GET /vcd/vapp`\n * `GET /vcd/vapp/{id}/missed_snapshot`\n * `GET /vcd/vapp/snapshot/{snapshot_id}/export/options`\n * `POST /vcd/vapp/snapshot/{snapshot_id}/export`\n * `POST /vcd/vapp/snapshot/{snapshot_id}/instant_recover`\n * `GET /vcd/vapp/snapshot/{snapshot_id}/instant_recover/options`\n * `GET /vcd/vapp/request/{id}`\n * `GET /vcd/vapp/{id}/search`\n * `POST /vcd/vapp/snapshot/{id}/download`\n\n ### Changes to Internal API in Rubrik version 5.3.2\n ## Deprecation:\n * Deprecated `compliance24HourStatus` in `DataSourceTableRequest` for\n `POST /report/data_source/table`.\n Use `complianceStatus`, `awaitingFirstFull`, and `snapshotRange`\n as replacements.\n\n ### Changes to Internal API in Rubrik version 5.3.1\n ## Breaking changes:\n * Added new required field `isPwdEncryptionSupported` to\n the API response `PlatformInfo` for password-based encryption at rest\n in the API `GET /cluster/{id}/platforminfo`.\n\n ## Feature additions/improvements:\n * Added new field `hostsInfo` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/children`.\n * Added new field `hostsInfo` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/descendants`.\n * Added new field `hostsInfo` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}`.\n * Added `shouldKeepConvertedDisksOnFailure` as an optional field in\n CreateCloudInstanceRequest definition used in the on-demand API\n conversion API `/cloud_on/aws/instance` and `/cloud_on/azure/instance`.\n This will enable converted disks to be kept on failure for CloudOn\n conversion.\n * Added the `hostsInfo` field to the OracleDbDetail that the\n `GET /oracle/db/{id}` and `PATCH /oracle/db/{id}` endpoints return.\n * Added new optional field `isOnNetAppSnapMirrorDestVolume` to\n HostShareParameters to support backup of NetApp SnapMirror\n destination volume.\n * Added new optional fields `encryptionPassword` and\n `newEncryptionPassword` to the KeyRotationOptions to support\n key rotation for password-based encryption at rest in\n internal API `POST /cluster/{id}/security/key_rotation`.\n * Added `Index` to `ReportableTaskType`.\n * Added new optional field `totpStatus` in `UserDetail` for\n showing the TOTP status of the user with the endpoint\n `GET /internal/user/{id}`\n * Added new optional field `isTotpEnforced` in `UserDefinition` for\n configuring the TOTP enforcement for the user with the endpoint\n `POST /internal/user`\n * Added new optional field `isTotpEnforced` in `UserUpdateInfo` for\n configuring the TOTP enforcement for the user with the endpoint\n `PATCH /internal/user/{id}`\n * Added a new field `HypervVirtualDiskInfo` to HypervVirtualMachineDetail \n used by `GET /hyperv/vm/{id}`.\n * Added a new field `virtualDiskIdsExcludedFromSnapshot` to \n HypervVirtualMachineUpdate used by `PATCH /hyperv/vm/{id}`.\n\n ### Changes to Internal API in Rubrik version 5.3.0\n ## Deprecation:\n * Deprecated `GET /authorization/role/admin`,\n `GET /authorization/role/compliance_officer`,\n `GET /authorization/role/end_user`,\n `GET /authorization/role/infra_admin`,\n `GET /authorization/role/managed_volume_admin`,\n `GET /authorization/role/managed_volume_user`,\n `GET /authorization/role/org_admin`,\n `GET /authorization/role/organization`,\n `GET /authorization/role/read_only_admin` endpoints. Use the new\n v1 endpoints for role management.\n * Deprecated `SnapshotCloudStorageTier` enum value Cold. It will be left,\n but will be mapped internally to the new value, AzureArchive, which is\n recommended as a replacement.\n * Deprecated the `GET /snapshot/{id}/storage/stats` endpoint. Use the v1\n version when possible.\n * Deprecated `POST /hierarchy/bulk_sla_conflicts`. It is migrated to\n v1 and using that is recommended.\n * Deprecated `GET /mssql/availability_group`,\n `GET /mssql/availability_group/{id}`,\n `PATCH /mssql/availability_group/{id}`, `PATCH /mssql/db/bulk`,\n `POST /mssql/db/bulk/snapshot`, `GET /mssql/db/bulk/snapshot/{id}`,\n `GET /mssql/db/count`, `DELETE /mssql/db/{id}/recoverable_range/download`,\n `GET /mssql/db/{id}/compatible_instance`, `GET /mssql/instance/count`,\n `GET /mssql/db/{id}/restore_estimate`, `GET /mssql/db/{id}/restore_files`,\n `GET /mssql/db/{id}/snappable_id`, `GET /mssql/db/defaults`,\n `PATCH /mssql/db/defaults` and `GET /mssql/db/recoverable_range/download/{id}`\n endpoints. Use the v1 version when possible.\n ## Breaking changes:\n * Added new Boolean field `isLinkLocalIpv4Mode` to `AddNodesConfig` and\n `ReplaceNodeConfig`.\n * Changed the type for ReplicationSnapshotLag, which is used by /report/{id} GET\n and PATCH endpoints from integer to string.\n * Added new required field `objectStore` to DataSourceDownloadConfig used by\n `POST /report/data_source/download`.\n * Removed the `storageClass` field from the DataSourceDownloadConfig object used\n by the `POST /report/data_source/download` endpoint. The value was not used.\n * Removed endpoint `GET /mfa/rsa/server` and moved it to v1.\n * Removed endpoint `POST /mfa/rsa/server` and moved it to v1.\n * Removed endpoint `GET /mfa/rsa/server/{id}` and moved it to v1.\n * Removed endpoint `PATCH /mfa/rsa/server/{id}` and moved it to v1.\n * Removed endpoint `DELETE /mfa/rsa/server/{id}` and moved it to v1.\n * Removed endpoint `PUT /cluster/{id}/security/web_signed_cert`\n and moved it to v1.\n * Removed endpoint `DELETE /cluster/{id}/security/web_signed_cert`\n and moved it to v1\n * Removed endpoint `PUT /cluster/{id}/security/kmip/client` and added it\n to v1.\n * Removed endpoint `GET /cluster/{id}/security/kmip/client` and added it\n to v1.\n * Removed endpoint `GET /cluster/{id}/security/kmip/server` and added it\n to v1.\n * Removed endpoint `PUT /cluster/{id}/security/kmip/server` and added it\n to v1.\n * Removed endpoint `DELETE /cluster/{id}/security/kmip/server` and added\n it to v1.\n * Removed endpoint `POST /replication/global_pause`. To toggle replication\n pause between enabled and disabled, use\n `POST /v1/replication/location_pause/disable` and\n `POST /v1/replication/location_pause/enable` instead.\n * Removed `GET /replication/global_pause`. To retrieve replication pause\n status, use `GET /internal/replication/source` and\n `GET /internal/replication/source/{id}` instead.\n * Removed `GET /node_management/{id}/fetch_package` since it was never used.\n * Removed `GET /node_management/{id}/upgrade` since it was never used.\n * Removed `POST /node_management/{id}/fetch_package` since it was never used.\n * Removed `POST /node_management/{id}/upgrade` since it was never used.\n\n ## Feature additions/improvements:\n * Added new optional field `pubKey` to the GlobalManagerConnectionUpdate\n object and the GlobalManagerConnectionInfo object used by\n `GET /cluster/{id}/global_manager` and `PUT /cluster/{id}/global_manager`.\n * Added a new optional field `storageClass` to the `ArchivalLocationSummary`\n type.\n * Added optional field `StartMethod` to the following components: \n ChartSummary, TableSummary, ReportTableRequest, FilterSummary and\n RequestFilters.\n * Added new enum field `StackedReplicationComplianceCountByStatus` to the\n measure property in ChartSummary.\n * Added new enum fields `ReplicationInComplianceCount`,\n `ReplicationNonComplianceCount` to the following properties:\n measure property in ChartSummary, column property in TableSummary,\n and sortBy property in ReportTableRequest.\n * Added the endpoint `GET /vmware/config/datastore_freespace_threshold` to\n query the VMware datastore freespace threshold config.\n * Added the endpoint `PATCH /vmware/config/set_datastore_freespace_threshold`\n to update the VMware datastore freespace threshold config.\n * Added two new optional query parameters `offset` and `limit` to\n `GET /organization`.\n * Added two new optional query parameters `offset` and `limit` to\n `GET /user/{id}/organization`.\n * Modified `SnapshotCloudStorageTier`, enum adding values AzureArchive, Glacier,\n and GlacierDeepArchive.\n * Added the `lastValidationResult` field to the OracleDbDetail that the\n `GET /oracle/db/{id}` and `PATCH /oracle/db/{id}` endpoints return.\n * Added `isValid` field to the OracleDbSnapshotSummary of\n OracleRecoverableRange that the `GET /oracle/db/\n {id}/recoverable_range` endpoint returns.\n * Added the `isRemoteGlobalBlackoutActive` field to the\n ReplicationSourceSummary object that the\n `GET /organization/{id}/replication/source` endpoint returns.\n * Added the `isRemoteGlobalBlackoutActive` field to the\n ReplicationSourceSummary object that the\n `GET /replication/source/{id}` endpoint returns.\n * Added the `isRemoteGlobalBlackoutActive` field to the\n ReplicationSourceSummary object that the\n `GET /replication/source` endpoint returns.\n * Added the `isReplicationTargetPauseEnabled` field to the\n ReplicationSourceSummary object that the\n `GET /organization/{id}/replication/source` endpoint returns.\n * Added the `isReplicationTargetPauseEnabled` field to the\n ReplicationSourceSummary object that the\n `GET /replication/source/{id}` endpoint returns.\n * Added the `isReplicationTargetPauseEnabled` field to the\n ReplicationSourceSummary object that the\n `GET /replication/source` endpoint returns.\n * Added new optional field `cloudRehydrationSpeed` to the\n ObjectStoreLocationSummary, ObjectStoreUpdateDefinition,\n PolarisAwsArchivalLocationSpec, and PolarisAzureArchivalLocationSpec\n objects to specify the rehydration speed to use when performing cloud\n rehydration on objects tiered cold storage.\n * Added new optional field earliestTimestamp to the `POST\n /polaris/export_info` endpoint to enable incremental MDS synchronization.\n * Added new values `RetentionSlaDomainName` , `ObjectType`, `SnapshotCount`,\n `AutoSnapshotCount` and `ManualSnapshotCount` to\n `UnmanagedObjectSortAttribute` field of the `GET /unmanaged_object` endpont.\n * Added new optional field `endpoint` to the ObjectStorageDetail\n object used by several Polaris APIs.\n * Added new optional field `accessKey` to the ObjectStorageConfig\n object used by several Polaris APIs.\n * Added new optional field `endpoint` to DataSourceDownloadConfig used by\n `POST /report/data_source/download`.\n * Added new field `slaClientConfig` to the `ManagedVolumeUpdate`\n object used by the `PATCH /managed_volume/{id}` endpoint to enable\n edits to the configuration of SLA Managed Volumes.\n * Added new field `shouldSkipPrechecks` to DecommissionNodesConfig used by\n `POST /cluster/{id}/decommission_nodes`.\n * Added new query parameter `managed_volume_type` to allow filtering\n managed volumes based on their type using the `GET /managed_volume`\n endpoint.\n * Added new query parameter `managed_volume_type` to allow filtering\n managed volume exports based on their source managed volume type\n using the `GET /managed_volume/snapshot/export` endpoint.\n * Added the new fields `mvType` and `slaClientConfig` to the\n `ManagedVolumeConfig` object. These fields are used with the\n `POST /managed_volume` endpoint to manage SLA Managed Volumes.\n * Added the new fields `mvType` and `slaManagedVolumeDetails` to the\n `ManagedVolumeSummary` object returned by the `GET /managed_volume`,\n `POST /managed_volume`, `GET /managed_volume/{id}` and\n `POST /managed_volume/{id}` endpoints.\n * Added new field `mvType` to the `ManagedVolumeSnapshotExportSummary`\n object returned by the `GET /managed_volume/snapshot/export` and\n `GET /managed_volume/snapshot/export/{id}` endpoints.\n * Added optional field `hostMountPoint` in the `ManagedVolumeChannelConfig`.\n `ManagedVolumeChannelConfig` is returned as part of\n `ManagedVolumeSnapshotExportSummary`, which is returned\n by the `GET /managed_volume/snapshot/export` and\n `GET /managed_volume/snapshot/export/{id}` endpoints.\n * Added `POST /managed_volume/{id}/snapshot` method to take an on\n demand snapshot for SLA Managed Volumes.\n * Added new field `isPrimary` to OracleDbSummary returned by\n `GET /oracle/db`.\n * Added new field `isPrimary` to OracleDbDetail returned by\n `GET /oracle/db/{id}` and `PATCH /oracle/db/{id}`.\n * Added new field `isOracleHost` to HostDetail\n returned by `GET /host/{id}`.\n * Added optional isShareAutoDiscoveryAndAdditionEnabled in the\n NasBaseConfig and NasConfig.\n NasBaseConfig is returned as part of HostSummary, which is returned by the\n `Get /host/envoy` and `Get /host` endpoints. NasConfig is used by\n HostRegister and HostUpdate. The HostRegister field is used by the\n `Post /host/bulk` endpoint and the HostUpdate is field used by the\n `PATCH /host/bulk` endpoint.\n * Added new endpoint `POST /managed_volume/{id}/resize` to resize managed\n volume to a larger size.\n * Added ReplicationComplianceStatus as an optional field to the TableSummary\n which is used by /report/{id} GET and PATCH endpoints and to RequestFilters\n which is used by /report/data_source/table.\n * Added `PATCH /cluster/{id}/trial_edge` endpoint to extend the trial period.\n * Added new optional fields `extensionsLeft` and `daysLeft` to\n EdgeTrialStatus returned by `GET /cluster/{id}/trial_edge` and\n `PATCH /cluster/{id}/trial_edge`.\n * Added new endpoint `POST /managed_volume/snapshot/{id}/restore` to export a\n managed volume snapshot and mount it on a host.\n * Added new endpoints `PATCH /config/{component}/reset` to allow configs to\n be reset to DEFAULT state.\n * Added a new field `logRetentionTimeInHours` to the `MssqlDbDefaults`\n object returned by the `GET /mssql/db/defaults` and\n `PATCH /mssql/db/defaults` endpoints.\n * Added new optional field `logRetentionTimeInHours` to `MssqlDbDefaultsUpdate`\n object which is used by `PATCH /mssql/db/defaults`.\n * Added new optional field `unreadable` to `BrowseResponse` and\n `SnapshotSearchResponse`, which are used by `GET /browse` and\n `GET /search/snapshot_search` respectively.\n * Added MissedReplicationSnapshots as an optional field to the TableSummary\n which is used by /report/{id} GET and PATCH endpoints.\n * Added new optional field `pitRecoveryInfo` to `ChildSnappableFailoverInfo`\n object which is used by `PUT /polaris/failover/target/{id}/start`\n * Added ReplicationDataLag as an optional field to the TableSummary\n which is used by /report/{id} GET and PATCH endpoints.\n * Added UnreplicatedSnapshots as an optional field to the TableSummary\n which is used by /report/{id} GET and PATCH endpoints.\n * Added the field `networkAdapterType` to `VappVmNetworkConnection`.\n `VappVmNetworkConnection` is returned by the\n `GET /vcd/vapp/snapshot/{snapshot_id}/instant_recover/options` and\n `GET /vcd/vapp/snapshot/{snapshot_id}/export/options` endpoints and is\n used by the `POST /vcd/vapp/snapshot/{snapshot_id}/export` and\n `POST /vcd/vapp/snapshot/{snapshot_id}/instant_recover` endpoints.\n Also added `VcdVmSnapshotDetail`, which is returned by the\n `GET /vcd/vapp/snapshot/{id}` endpoint.\n * Added new endpoint `GET /report/template` to return details\n of a report template.\n * Added new endpoint `POST /report/{id}/send_email` to send an email of the report.\n ## Breaking changes:\n * Made field `restoreScriptSmbPath` optional in `VolumeGroupMountSummary`.\n Endpoints `/volume_group/snapshot/mount` and\n `/volume_group/snapshot/mount/{id}` are affected by this change.\n * Moved endpoints `GET /volume_group`, `GET /volume_group/{id}`,\n `PATCH /volume_group/{id}`, `GET /volume_group/{id}/snapshot`,\n `POST /volume_group/{id}/snapshot`, `GET /volume_group/snapshot/{id}`,\n `GET /volume_group/snapshot/mount`, and\n `GET /volume_group/snapshot/mount/{id}` from internal to v1.\n * Moved endpoint `GET /host/{id}/volume` from internal to v1.\n\n ### Changes to Internal API in Rubrik version 5.2.2\n ## Feature Additions/improvements:\n * Added new field `exposeAllLogs` to ExportOracleTablespaceConfig\n used by `POST /oracle/db/{id}/export/tablespace`.\n\n ### Changes to Internal API in Rubrik version 5.2.1\n ## Feature Additions/improvements:\n * Added new field `shouldBlockOnNegativeFailureTolerance` to\n DecommissionNodesConfig used by `POST /cluster/{id}/decommission_nodes`.\n\n ### Changes to Internal API in Rubrik version 5.2.0\n ## Deprecation:\n * Deprecating `GET /replication/global_pause`. Use\n `GET /internal/replication/source` and\n `GET /internal/replication/source/{id}` to retrieve replication\n pause status in CDM v5.3.\n * Deprecating `POST /replication/global_pause`. Use\n `POST /v1/replication/location_pause/disable` and\n `POST /v1/replication/location_pause/enable` to toggle replication\n pause in CDM v5.3.\n * Deprecating `slaId` field returned by `GET /vcd/vapp/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /vcd/vapp/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /oracle/db/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /oracle/db/\n {id}/recoverable_range`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /oracle/db/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /hyperv/vm/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /hyperv/vm/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /volume_group/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /volume_group/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n * Deprecating `slaId` field returned by `GET /storage/array_volume_group\n/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /vcd/vapp/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /host_fileset/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /host_fileset/share/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /app_blueprint/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /app_blueprint/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /managed_volume/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `POST /managed_volume/{id\n}/end_snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /managed_volume/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /aws/ec2_instance/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /aws/ec2_instance/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /nutanix/vm/{id}/snapshot`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /nutanix/vm/snapshot/{id}`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Deprecating `slaId` field returned by `GET /fileset/bulk`.\n See **snapshotRetentionInfo** to track retention for\n snapshots.\n* Added a new field `pendingSlaDomain` to `VirtualMachineDetail`\n object referred by `VappVmDetail` returned by\n `GET /vcd/vapp/{id}` and `PATCH /vcd/vapp/{id}`\n * Deprecated `POST /internal/vmware/vcenter/{id}/refresh_vm` endpoint. Use\n `POST /v1/vmware/vcenter/{id}/refresh_vm` instead to refresh a\n virtual machine by MOID.\n\n ## Breaking changes:\n* Rename the field configuredSlaDomainId in the OracleUpdate object to\n configuredSlaDomainIdDeprecated and modify the behavior so\n configuredSlaDomainIdDeprecated is only used to determine log backup\n frequency and not to set retention time.\n* Removed `GET /event/count_by_status` endpoint and it will be\n replaced by `GET /job_monitoring/summary_by_job_state`.\n* Removed `GET /event/count_by_job_type` endpoint and it will be\n replaced by `GET /job_monitoring/summary_by_job_type`.\n* Removed `GET /event_series` endpoint and it will be replaced by\n `GET /job_monitoring`.\n* Refactor `PUT /cluster/{id}/security/web_signed_cert` to accept\n certificate_id instead of X.509 certificate text. Also removed\n the `POST /cluster/{id}/security/web_csr` endpoint.\n * Refactor `GET /rsa-server`, `POST /rsa-server`, `GET /rsa-server/{id}`,\n and `PATCH /rsa-server/{id}` to take in a certificate ID instead of\n a certificate.\n * Changed definition of CloudInstanceUpdate by updating the enums ON/OFF\n to POWERSTATUS_ON/POWERSTATUS_OFF\n * Removed `GET /event_series/{status}/csv_link` endpoint to download CSV\n with job monitoring information. It has been replaced by the\n `GET /job_monitoring//csv_download_link` v1 endpoint.\n * Removed GET `/report/summary/physical_storage_time_series`. Use\n GET `/stats/total_physical_storage/time_series` instead.\n * Removed GET `/report/summary/average_local_growth_per_day`. Use\n GET `/stats/average_storage_growth_per_day` instead.\n * Removed POST `/job/instances/`. Use GET `/job/{job_id}/instances` instead.\n * Removed the POST `/cluster/{id}/reset` endpoint.\n * Removed GET `/user`. Use the internal POST `/principal_search`\n or the v1 GET `/principal` instead for querying any principals,\n including users.\n\n ## Feature additions/improvements:\n * Added the `GET /replication/global_pause` endpoint to return the current\n status of global replication pause. Added the `POST /replication/global_pause`.\n endpoint to toggle the replication target global pause jobs status. When\n global replication pause is enabled, all replication jobs on the local\n cluster are paused. When disabling global replication pause, optional\n parameter `shouldOnlyReplicateNewSnapshots` can be set to `true` to only\n replicate snapshots taken after disabling the pause. These endpoints must\n be used at the target cluster.\n * Added new field `parentSnapshotId` to AppBlueprintSnapshotSummary returned\n by `GET /app_blueprint/{id}/snapshot`.\n * Added new field `parentSnapshotId` to AppBlueprintSnapshotDetail returned\n by `GET /app_blueprint/snapshot/{id}`.\n * Added new field `parentSnapshotId` to AwsEc2InstanceSummary returned by\n `GET /aws/ec2_instance`.\n * Added new field `parentSnapshotId` to AwsEc2InstanceDetail returned by\n `GET /aws/ec2_instance/{id}`.\n * Added new field `parentSnapshotId` to AwsEc2InstanceDetail returned by\n `PATCH /aws/ec2_instance/{id}`.\n * Added new field `parentSnapshotId` to HypervVirtualMachineSnapshotSummary\n returned by `GET /hyperv/vm/{id}/snapshot`.\n * Added new field `parentSnapshotId` to HypervVirtualMachineSnapshotDetail\n returned by `GET /hyperv/vm/snapshot/{id}`.\n * Added new field `parentSnapshotId` to ManagedVolumeSnapshotSummary\n returned by `GET /managed_volume/{id}/snapshot`.\n * Added new field `parentSnapshotId` to ManagedVolumeSnapshotSummary\n returned by `POST /managed_volume/{id}/end_snapshot`.\n * Added new field `parentSnapshotId` to ManagedVolumeSnapshotDetail returned\n by `GET /managed_volume/snapshot/{id}`.\n * Added new field `parentSnapshotId` to NutanixVmSnapshotSummary returned by\n `GET /nutanix/vm/{id}/snapshot`.\n * Added new field `parentSnapshotId` to NutanixVmSnapshotDetail returned by\n `GET /nutanix/vm/snapshot/{id}`.\n * Added new field `parentSnapshotId` to OracleDbSnapshotSummary returned by\n `GET /oracle/db/{id}/snapshot`.\n * Added new field `parentSnapshotId` to OracleDbSnapshotDetail returned by\n `GET /oracle/db/snapshot/{id}`.\n * Added new field `parentSnapshotId` to StorageArrayVolumeGroupSnapshotSummary\n returned by `GET /storage/array_volume_group/{id}/snapshot`.\n * Added new field `parentSnapshotId` to StorageArrayVolumeGroupSnapshotDetail\n returned by `GET /storage/array_volume_group/snapshot/{id}`.\n * Added new field `parentSnapshotId` to VcdVappSnapshotSummary returned by\n `GET /vcd/vapp/{id}/snapshot`.\n * Added new field `parentSnapshotId` to VcdVappSnapshotDetail returned by\n `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `parentSnapshotId` to VolumeGroupSnapshotSummary returned by\n `GET /volume_group/{id}/snapshot`.\n * Added new field `parentSnapshotId` to VolumeGroupSnapshotDetail returned by\n `GET /volume_group/snapshot/{id}`.\n * Added new field `retentionSlaDomanId` to MssqlAvailabilityGroupSummary\n returned by `GET /mssql/availability_group`.\n * Added new field `retentionSlaDomanId` to MssqlAvailabilityGroupDetail\n returned by `GET /mssql/availability_group/{id}`.\n * Added new field `retentionSlaDomanId` to MssqlAvailabilityGroupDetail\n returned by `PATCH /mssql/availability_group/{id}`.\n * Added new field `retentionSlaDomainId` to UnmanagedObjectSummary\n returned by `GET /unmanaged_object`.\n * Added new field `retentionSlaDomainId` to ManagedVolumeSummary\n returned by `GET /managed_volume`.\n * Added new field `retentionSlaDomainId` to AppBlueprintDetail\n returned by `GET /app_blueprint/{id}`.\n * Added new field `retentionSlaDomainId` to AppBlueprintDetail\n returned by `PATCH /polaris/app_blueprint/{id}`.\n * Added new field `retentionSlaDomainId` to AppBlueprintDetail\n returned by `POST /polaris/app_blueprint`.\n * Added new field `retentionSlaDomainId` to AppBlueprintExportSnapshotJobConfig\n returned by `POST /polaris/app_blueprint/snapshot/{id}/export`.\n * Added new field `retentionSlaDomainId` to AppBlueprintInstantRecoveryJobConfig\n returned by `POST /polaris/app_blueprint/snapshot/{id}/instant_recover`.\n * Added new field `retentionSlaDomainId` to AppBlueprintMountSnapshotJobConfig\n returned by `POST /polaris/app_blueprint/snapshot/{id}/mount`.\n * Added new field `retentionSlaDomainId` to AppBlueprintSummary\n returned by `GET /app_blueprint`.\n * Added new field `retentionSlaDomainId` to AwsEc2InstanceDetail\n returned by `GET /aws/ec2_instance/{id}`.\n * Added new field `retentionSlaDomainId` to AwsEc2InstanceDetail\n returned by `PATCH /aws/ec2_instance/{id}`.\n * Added new field `retentionSlaDomainId` to AwsEc2InstanceSummary\n returned by `GET /aws/ec2_instance`.\n * Added new field `retentionSlaDomainId` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to HypervHierarchyObjectSummary\n returned by `GET /organization/{id}/hyperv`.\n * Added new field `retentionSlaDomainId` to HypervVirtualMachineDetail\n returned by `GET /hyperv/vm/{id}`.\n * Added new field `retentionSlaDomainId` to HypervVirtualMachineDetail\n returned by `PATCH /hyperv/vm/{id}`.\n * Added new field `retentionSlaDomainId` to HypervVirtualMachineSummary\n returned by `GET /hyperv/vm`.\n * Added new field `retentionSlaDomainId` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}/sla_conflicts`.\n * Added new field `retentionSlaDomainId` to ManagedVolumeSummary\n returned by `GET /managed_volume/{id}`.\n * Added new field `retentionSlaDomainId` to ManagedVolumeSummary\n returned by `GET /organization/{id}/managed_volume`.\n * Added new field `retentionSlaDomainId` to ManagedVolumeSummary\n returned by `PATCH /managed_volume/{id}`.\n * Added new field `retentionSlaDomainId` to ManagedVolumeSummary\n returned by `POST /managed_volume`.\n * Added new field `retentionSlaDomainId` to MountDetail\n returned by `GET /vmware/vm/snapshot/mount/{id}`.\n * Added new field `retentionSlaDomainId` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to NutanixHierarchyObjectSummary\n returned by `GET /organization/{id}/nutanix`.\n * Added new field `retentionSlaDomainId` to OracleDbDetail\n returned by `GET /oracle/db/{id}`.\n * Added new field `retentionSlaDomainId` to OracleDbDetail\n returned by `PATCH /oracle/db/{id}`.\n * Added new field `retentionSlaDomainId` to OracleDbSummary\n returned by `GET /oracle/db`.\n * Added new field `retentionSlaDomainId` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to OracleHierarchyObjectSummary\n returned by `GET /organization/{id}/oracle`.\n * Added new field `retentionSlaDomainId` to SlaConflictsSummary\n returned by `POST /hierarchy/bulk_sla_conflicts`.\n * Added new field `retentionSlaDomainId` to SnappableRecoverySpecDetails\n returned by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new field `retentionSlaDomainId` to SnappableRecoverySpec\n returned by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new field `retentionSlaDomainId` to Snappable\n returned by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new field `retentionSlaDomainId` to Snappable\n returned by `POST /stats/snappable_storage`.\n * Added new field `retentionSlaDomainId` to StorageArrayHierarchyObjectSummary\n returned by `GET /organization/{id}/storage/array`.\n * Added new field `retentionSlaDomainId` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to StorageArrayVolumeGroupDetail\n returned by `GET /storage/array_volume_group/{id}`.\n * Added new field `retentionSlaDomainId` to StorageArrayVolumeGroupDetail\n returned by `PATCH /storage/array_volume_group/{id}`.\n * Added new field `retentionSlaDomainId` to StorageArrayVolumeGroupDetail\n returned by `POST /storage/array_volume_group`.\n * Added new field `retentionSlaDomainId` to StorageArrayVolumeGroupSummary\n returned by `GET /storage/array_volume_group`.\n * Added new field `retentionSlaDomainId` to TriggerFailoverOnTargetDefinition\n returned by `PUT /polaris/failover/target/{id}/resume`.\n * Added new field `retentionSlaDomainId` to TriggerFailoverOnTargetDefinition\n returned by `PUT /polaris/failover/target/{id}/start`.\n * Added new field `retentionSlaDomainId` to UpsertSnappableRecoverySpecResponse\n returned by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new field `retentionSlaDomainId` to VcdHierarchyObjectSummary\n returned by `GET /organization/{id}/vcd`.\n * Added new field `retentionSlaDomainId` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/children`.\n * Added new field `retentionSlaDomainId` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/descendants`.\n * Added new field `retentionSlaDomainId` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}`.\n * Added new field `retentionSlaDomainId` to VcdVappDetail\n returned by `GET /vcd/vapp/{id}`.\n * Added new field `retentionSlaDomainId` to VcdVappDetail\n returned by `PATCH /vcd/vapp/{id}`.\n * Added new field `retentionSlaDomainId` to VcdVappSnapshotDetail\n returned by `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `retentionSlaDomainId` to VolumeGroupDetail\n returned by `GET /volume_group/{id}`.\n * Added new field `retentionSlaDomainId` to VolumeGroupDetail\n returned by `PATCH /volume_group/{id}`.\n * Added new field `retentionSlaDomainId` to VolumeGroupSummary\n returned by `GET /volume_group`.\n * Added new field `retentionSlaDomainId` to AwsHierarchyObjectSummary\n returned by `GET /organization/{id}/aws`.\n * Added new field `retentionSlaDomainId` to VmwareVmMountSummary\n returned by `GET /vmware/vm/snapshot/mount`.\n * Added new field `retentionSlaDomainId` to VcdVappSummary\n returned by `GET /vcd/vapp`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `GET /replication/target`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `POST /replication/target`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `GET /replication/target/{id}`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `GET /replication/target/{id}`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `PATCH /replication/target/{id}`.\n * Added `isReplicationTargetPauseEnabled` to ReplicationTargetSummary\n returned by `GET /organization/{id}/replication/target`.\n * Added new field `hasSnapshotsWithPolicy` to UnmanagedObjectSummary returned\n by GET `/unmanaged_object`\n * Added new field `slaLastUpdateTime` to AppBlueprintDetail\n returned by POST `/polaris/app_blueprint`.\n * Added new field `slaLastUpdateTime` to AppBlueprintDetail\n returned by `GET /app_blueprint/{id}`.\n * Added new field `slaLastUpdateTime` to AppBlueprintDetail\n returned by `PATCH /polaris/app_blueprint/{id}`.\n * Added new field `slaLastUpdateTime` to AppBlueprintExportSnapshotJobConfig\n returned by POST `/polaris/app_blueprint/snapshot/{id}/export`.\n * Added new field `slaLastUpdateTime` to AppBlueprintInstantRecoveryJobConfig\n returned by POST `/polaris/app_blueprint/snapshot/{id}/instant_recover`.\n * Added new field `slaLastUpdateTime` to AppBlueprintMountSnapshotJobConfig\n returned by POST `/polaris/app_blueprint/snapshot/{id}/mount`.\n * Added new field `slaLastUpdateTime` to AppBlueprintSummary\n returned by `GET /app_blueprint`.\n * Added new field `slaLastUpdateTime` to AwsAccountDetail\n returned by `PATCH /aws/account/dca/{id}`.\n * Added new field `slaLastUpdateTime` to AwsAccountDetail\n returned by `GET /aws/account/{id}`.\n * Added new field `slaLastUpdateTime` to AwsAccountDetail\n returned by `PATCH /aws/account/{id}`.\n * Added new field `slaLastUpdateTime` to AwsEc2InstanceDetail\n returned by `GET /aws/ec2_instance/{id}`.\n * Added new field `slaLastUpdateTime` to AwsEc2InstanceDetail\n returned by `PATCH /aws/ec2_instance/{id}`.\n * Added new field `slaLastUpdateTime` to FilesetDetail\n returned by POST `/fileset/bulk`.\n * Added new field `slaLastUpdateTime` to AwsEc2InstanceSummary\n returned by `GET /aws/ec2_instance`.\n * Added new field `slaLastUpdateTime` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to AwsHierarchyObjectSummary\n returned by `GET /organization/{id}/aws`.\n * Added new field `slaLastUpdateTime` to DataCenterDetail\n returned by `GET /vmware/data_center/{id}`.\n * Added new field `slaLastUpdateTime` to DataCenterSummary\n returned by `GET /vmware/data_center`.\n * Added new field `slaLastUpdateTime` to DataStoreDetail\n returned by `GET /vmware/datastore/{id}`.\n * Added new field `slaLastUpdateTime` to FolderDetail\n returned by `GET /folder/host/{datacenter_id}`.\n * Added new field `slaLastUpdateTime` to FolderDetail\n returned by `GET /folder/vm/{datacenter_id}`.\n * Added new field `slaLastUpdateTime` to FolderDetail\n returned by `GET /folder/{id}`.\n * Added new field `slaLastUpdateTime` to HostFilesetDetail\n returned by `GET /host_fileset/{id}`.\n * Added new field `slaLastUpdateTime` to HostFilesetShareDetail\n returned by `GET /host_fileset/share/{id}`.\n * Added new field `slaLastUpdateTime` to HostFilesetShareSummary\n returned by `GET /host_fileset/share`.\n * Added new field `slaLastUpdateTime` to HostFilesetSummary\n returned by `GET /host_fileset`.\n * Added new field `slaLastUpdateTime` to HypervClusterDetail\n returned by `GET /hyperv/cluster/{id}`.\n * Added new field `slaLastUpdateTime` to HypervClusterDetail\n returned by `PATCH /hyperv/cluster/{id}`.\n * Added new field `slaLastUpdateTime` to HypervClusterSummary\n returned by `GET /hyperv/cluster`.\n * Added new field `slaLastUpdateTime` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to HypervHierarchyObjectSummary\n returned by `GET /organization/{id}/hyperv`.\n * Added new field `slaLastUpdateTime` to HypervHostDetail\n returned by `GET /hyperv/host/{id}`.\n * Added new field `slaLastUpdateTime` to HypervHostDetail\n returned by `PATCH /hyperv/host/{id}`.\n * Added new field `slaLastUpdateTime` to HypervHostSummary\n returned by `GET /hyperv/host`.\n * Added new field `slaLastUpdateTime` to HypervScvmmDetail\n returned by `GET /hyperv/scvmm/{id}`.\n * Added new field `slaLastUpdateTime` to HypervScvmmDetail\n returned by `PATCH /hyperv/scvmm/{id}`.\n * Added new field `slaLastUpdateTime` to HypervScvmmSummary\n returned by `GET /hyperv/scvmm`.\n * Added new field `slaLastUpdateTime` to HypervVirtualMachineDetail\n returned by `GET /hyperv/vm/{id}`.\n * Added new field `slaLastUpdateTime` to HypervVirtualMachineDetail\n returned by `PATCH /hyperv/vm/{id}`.\n * Added new field `slaLastUpdateTime` to HypervVirtualMachineSummary\n returned by `GET /hyperv/vm`.\n * Added new field `slaLastUpdateTime` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}/sla_conflicts`.\n * Added new field `slaLastUpdateTime` to ManagedVolumeSummary\n returned by `GET /managed_volume`.\n * Added new field `slaLastUpdateTime` to ManagedVolumeSummary\n returned by POST `/managed_volume`.\n * Added new field `slaLastUpdateTime` to ManagedVolumeSummary\n returned by `GET /managed_volume/{id}`.\n * Added new field `slaLastUpdateTime` to ManagedVolumeSummary\n returned by `PATCH /managed_volume/{id}`.\n * Added new field `slaLastUpdateTime` to ManagedVolumeSummary\n returned by `GET /organization/{id}/managed_volume`.\n * Added new field `slaLastUpdateTime` to MountDetail\n returned by `GET /vmware/vm/snapshot/mount/{id}`.\n * Added new field `slaLastUpdateTime` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to NutanixHierarchyObjectSummary\n returned by `GET /organization/{id}/nutanix`.\n * Added new field `slaLastUpdateTime` to OracleDbDetail\n returned by `GET /oracle/db/{id}`.\n * Added new field `slaLastUpdateTime` to OracleDbDetail\n returned by `PATCH /oracle/db/{id}`.\n * Added new field `slaLastUpdateTime` to OracleDbSummary\n returned by `GET /oracle/db`.\n * Added new field `slaLastUpdateTime` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to OracleHierarchyObjectSummary\n returned by `GET /organization/{id}/oracle`.\n * Added new field `slaLastUpdateTime` to OracleHostDetail\n returned by `GET /oracle/host/{id}`.\n * Added new field `slaLastUpdateTime` to OracleHostDetail\n returned by `PATCH /oracle/host/{id}`.\n * Added new field `slaLastUpdateTime` to OracleHostSummary\n returned by `GET /oracle/host`.\n * Added new field `slaLastUpdateTime` to OracleRacDetail\n returned by `GET /oracle/rac/{id}`.\n * Added new field `slaLastUpdateTime` to OracleRacDetail\n returned by `PATCH /oracle/rac/{id}`.\n * Added new field `slaLastUpdateTime` to OracleRacSummary\n returned by `GET /oracle/rac`.\n * Added new field `slaLastUpdateTime` to Snappable\n returned by POST `/polaris/failover/recovery_spec/upsert`.\n * Added new field `slaLastUpdateTime` to SnappableRecoverySpec\n returned by POST `/polaris/failover/recovery_spec/upsert`.\n * Added new field `slaLastUpdateTime` to SnappableRecoverySpecDetails\n returned by POST `/polaris/failover/recovery_spec/upsert`.\n * Added new field `slaLastUpdateTime` to StorageArrayHierarchyObjectSummary\n returned by `GET /organization/{id}/storage/array`.\n * Added new field `slaLastUpdateTime` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to StorageArrayHierarchyObjectSummary\n returned by `GET /storage/array/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to StorageArrayVolumeGroupDetail\n returned by POST `/storage/array_volume_group`.\n * Added new field `slaLastUpdateTime` to StorageArrayVolumeGroupDetail\n returned by `GET /storage/array_volume_group/{id}`.\n * Added new field `slaLastUpdateTime` to StorageArrayVolumeGroupDetail\n returned by `PATCH /storage/array_volume_group/{id}`.\n * Added new field `slaLastUpdateTime` to StorageArrayVolumeGroupSummary\n returned by `GET /storage/array_volume_group`.\n * Added new field `slaLastUpdateTime` to VcdClusterDetail\n returned by `GET /vcd/cluster/{id}`.\n * Added new field `slaLastUpdateTime` to VcdClusterDetail\n returned by `PATCH /vcd/cluster/{id}`.\n * Added new field `slaLastUpdateTime` to VcdClusterSummary\n returned by `GET /vcd/cluster`.\n * Added new field `slaLastUpdateTime` to VcdHierarchyObjectSummary\n returned by `GET /organization/{id}/vcd`.\n * Added new field `slaLastUpdateTime` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}`.\n * Added new field `slaLastUpdateTime` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/children`.\n * Added new field `slaLastUpdateTime` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/descendants`.\n * Added new field `slaLastUpdateTime` to VcdVappDetail\n returned by `GET /vcd/vapp/{id}`.\n * Added new field `slaLastUpdateTime` to VcdVappDetail\n returned by `PATCH /vcd/vapp/{id}`.\n * Added new field `slaLastUpdateTime` to VcdVappSnapshotDetail\n returned by `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `slaLastUpdateTime` to VcdVappSummary\n returned by `GET /vcd/vapp`.\n * Added new field `slaLastUpdateTime` to VmwareVmMountSummary\n returned by `GET /vmware/vm/snapshot/mount`.\n * Added new field `slaLastUpdateTime` to VolumeGroupDetail\n returned by `GET /volume_group/{id}`.\n * Added new field `slaLastUpdateTime` to VolumeGroupDetail\n returned by `PATCH /volume_group/{id}`.\n * Added new field `slaLastUpdateTime` to VolumeGroupSummary\n returned by `GET /volume_group`.\n * Added new field `slaLastUpdateTime` to VsphereCategory\n returned by `GET /vmware/vcenter/{id}/tag_category`.\n * Added new field `slaLastUpdateTime` to VsphereCategory\n returned by `GET /vmware/vcenter/tag_category/{tag_category_id}`.\n * Added new field `slaLastUpdateTime` to VsphereTag\n returned by `GET /vmware/vcenter/{id}/tag`.\n * Added new field `slaLastUpdateTime` to VsphereTag\n returned by `GET /vmware/vcenter/tag/{tag_id}`.\n * Added new Field `configuredSlaDomainType` to AppBlueprintDetail returned by\n `POST /polaris/app_blueprint`.\n * Added new Field `configuredSlaDomainType` to AppBlueprintDetail returned by\n `GET /app_blueprint/{id}`.\n * Added new Field `configuredSlaDomainType` to AppBlueprintDetail returned by\n `PATCH /polaris/app_blueprint/{id}`.\n * Added new Field `configuredSlaDomainType` to\n AppBlueprintExportSnapshotJobConfig returned by\n `POST /polaris/app_blueprint/snapshot/{id}/export`.\n * Added new Field `configuredSlaDomainType` to\n AppBlueprintInstantRecoveryJobConfig returned by\n `POST /polaris/app_blueprint/snapshot/{id}/instant_recover`.\n * Added new Field `configuredSlaDomainType` to\n AppBlueprintMountSnapshotJobConfig returned by\n `POST /polaris/app_blueprint/snapshot/{id}/mount`.\n * Added new Field `configuredSlaDomainType` to AppBlueprintSummary returned by\n `GET /app_blueprint`.\n * Added new Field `configuredSlaDomainType` to AwsAccountDetail returned by\n `PATCH /aws/account/dca/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsAccountDetail returned by\n `GET /aws/account/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsAccountDetail returned by\n `PATCH /aws/account/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsEc2InstanceDetail returned\n by `GET /aws/ec2_instance/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsEc2InstanceDetail returned\n by `PATCH /aws/ec2_instance/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsEc2InstanceSummary returned\n by `GET /aws/ec2_instance`.\n * Added new Field `configuredSlaDomainType` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to AwsHierarchyObjectSummary\n returned by `GET /aws/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to AwsHierarchyObjectSummary\n returned by `GET /organization/{id}/aws`.\n * Added new Field `configuredSlaDomainType` to DataCenterDetail returned by\n `GET /vmware/data_center/{id}`.\n * Added new Field `configuredSlaDomainType` to DataCenterSummary returned by\n `GET /vmware/data_center`.\n * Added new Field `configuredSlaDomainType` to DataStoreDetail returned by\n `GET /vmware/datastore/{id}`.\n * Added new Field `configuredSlaDomainType` to FilesetDetail returned by\n `POST /fileset/bulk`.\n * Added new Field `configuredSlaDomainType` to FolderDetail returned by\n `GET /folder/host/{datacenter_id}`.\n * Added new Field `configuredSlaDomainType` to FolderDetail returned by\n `GET /folder/vm/{datacenter_id}`.\n * Added new Field `configuredSlaDomainType` to FolderDetail returned by\n `GET /folder/{id}`.\n * Added new Field `configuredSlaDomainType` to HostFilesetDetail returned by\n `GET /host_fileset/{id}`.\n * Added new Field `configuredSlaDomainType` to HostFilesetShareDetail returned\n by `GET /host_fileset/share/{id}`.\n * Added new Field `configuredSlaDomainType` to HostFilesetShareSummary\n returned by `GET /host_fileset/share`.\n * Added new Field `configuredSlaDomainType` to HostFilesetSummary returned by\n `GET /host_fileset`.\n * Added new Field `configuredSlaDomainType` to HypervClusterDetail returned by\n `GET /hyperv/cluster/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervClusterDetail returned by\n `PATCH /hyperv/cluster/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervClusterSummary returned\n by `GET /hyperv/cluster`.\n * Added new Field `configuredSlaDomainType` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to HypervHierarchyObjectSummary\n returned by `GET /hyperv/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to HypervHierarchyObjectSummary\n returned by `GET /organization/{id}/hyperv`.\n * Added new Field `configuredSlaDomainType` to HypervHostDetail returned by\n `GET /hyperv/host/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervHostDetail returned by\n `PATCH /hyperv/host/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervHostSummary returned by\n `GET /hyperv/host`.\n * Added new Field `configuredSlaDomainType` to HypervScvmmDetail returned by\n `GET /hyperv/scvmm/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervScvmmDetail returned by\n `PATCH /hyperv/scvmm/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervScvmmSummary returned by\n `GET /hyperv/scvmm`.\n * Added new Field `configuredSlaDomainType` to HypervVirtualMachineDetail\n returned by `GET /hyperv/vm/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervVirtualMachineDetail\n returned by `PATCH /hyperv/vm/{id}`.\n * Added new Field `configuredSlaDomainType` to HypervVirtualMachineSummary\n returned by `GET /hyperv/vm`.\n * Added new Field `configuredSlaDomainType` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to ManagedHierarchyObjectSummary\n returned by `GET /hierarchy/{id}/sla_conflicts`.\n * Added new Field `configuredSlaDomainType` to ManagedVolumeSummary returned\n by `GET /managed_volume`.\n * Added new Field `configuredSlaDomainType` to ManagedVolumeSummary returned\n by `POST /managed_volume`.\n * Added new Field `configuredSlaDomainType` to ManagedVolumeSummary returned\n by `GET /managed_volume/{id}`.\n * Added new Field `configuredSlaDomainType` to ManagedVolumeSummary returned\n by `PATCH /managed_volume/{id}`.\n * Added new Field `configuredSlaDomainType` to ManagedVolumeSummary returned\n by `GET /organization/{id}/managed_volume`.\n * Added new Field `configuredSlaDomainType` to MountDetail returned by\n `GET /vmware/vm/snapshot/mount/{id}`.\n * Added new Field `configuredSlaDomainType` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to NutanixHierarchyObjectSummary\n returned by `GET /nutanix/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to NutanixHierarchyObjectSummary\n returned by `GET /organization/{id}/nutanix`.\n * Added new Field `configuredSlaDomainType` to OracleDbDetail returned by\n `GET /oracle/db/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleDbDetail returned by\n `PATCH /oracle/db/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleDbSummary returned by\n `GET /oracle/db`.\n * Added new Field `configuredSlaDomainType` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to OracleHierarchyObjectSummary\n returned by `GET /oracle/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to OracleHierarchyObjectSummary\n returned by `GET /organization/{id}/oracle`.\n * Added new Field `configuredSlaDomainType` to OracleHostDetail returned by\n `GET /oracle/host/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleHostDetail returned by\n `PATCH /oracle/host/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleHostSummary returned by\n `GET /oracle/host`.\n * Added new Field `configuredSlaDomainType` to OracleRacDetail returned by\n `GET /oracle/rac/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleRacDetail returned by\n `PATCH /oracle/rac/{id}`.\n * Added new Field `configuredSlaDomainType` to OracleRacSummary returned by\n `GET /oracle/rac`.\n * Added new Field `configuredSlaDomainType` to SlaConflictsSummary returned by\n `POST /hierarchy/bulk_sla_conflicts`.\n * Added new Field `configuredSlaDomainType` to Snappable returned by\n `POST /polaris/failover/recovery_spec/upsert`.\n * Added new Field `configuredSlaDomainType` to SnappableRecoverySpec returned\n by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new Field `configuredSlaDomainType` to SnappableRecoverySpecDetails\n returned by `POST /polaris/failover/recovery_spec/upsert`.\n * Added new Field `configuredSlaDomainType` to\n StorageArrayHierarchyObjectSummary returned by\n `GET /organization/{id}/storage/array`.\n * Added new Field `configuredSlaDomainType` to\n StorageArrayHierarchyObjectSummary returned by\n `GET /storage/array/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to\n StorageArrayHierarchyObjectSummary returned by\n `GET /storage/array/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to\n StorageArrayHierarchyObjectSummary returned by\n `GET /storage/array/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to StorageArrayVolumeGroupDetail\n returned by `POST /storage/array_volume_group`.\n * Added new Field `configuredSlaDomainType` to StorageArrayVolumeGroupDetail\n returned by `GET /storage/array_volume_group/{id}`.\n * Added new Field `configuredSlaDomainType` to StorageArrayVolumeGroupDetail\n returned by `PATCH /storage/array_volume_group/{id}`.\n * Added new Field `configuredSlaDomainType` to StorageArrayVolumeGroupSummary\n returned by `GET /storage/array_volume_group`.\n * Added new Field `configuredSlaDomainType` to\n TriggerFailoverOnTargetDefinition returned by\n `PUT /polaris/failover/target/{id}/start`.\n * Added new Field `configuredSlaDomainType` to\n TriggerFailoverOnTargetDefinition returned by\n `PUT /polaris/failover/target/{id}/resume`.\n * Added new Field `configuredSlaDomainType` to\n UnmanagedObjectSummary returned by `GET /unmanaged_object`.\n * Added new Field `configuredSlaDomainType` to\n UpsertSnappableRecoverySpecResponse returned by\n `POST /polaris/failover/recovery_spec/upsert`.\n * Added new Field `configuredSlaDomainType` to VcdClusterDetail returned by\n `GET /vcd/cluster/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdClusterDetail returned by\n `PATCH /vcd/cluster/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdClusterSummary returned by\n `GET /vcd/cluster`.\n * Added new Field `configuredSlaDomainType` to VcdHierarchyObjectSummary\n returned by `GET /organization/{id}/vcd`.\n * Added new Field `configuredSlaDomainType` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/children`.\n * Added new Field `configuredSlaDomainType` to VcdHierarchyObjectSummary\n returned by `GET /vcd/hierarchy/{id}/descendants`.\n * Added new Field `configuredSlaDomainType` to VcdVappDetail returned by\n `GET /vcd/vapp/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdVappDetail returned by\n `PATCH /vcd/vapp/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdVappSnapshotDetail returned\n by `GET /vcd/vapp/snapshot/{id}`.\n * Added new Field `configuredSlaDomainType` to VcdVappSummary returned by\n `GET /vcd/vapp`.\n * Added new Field `configuredSlaDomainType` to VmwareVmMountSummary returned\n by `GET /vmware/vm/snapshot/mount`.\n * Added new Field `configuredSlaDomainType` to VolumeGroupDetail returned by\n `GET /volume_group/{id}`.\n * Added new Field `configuredSlaDomainType` to VolumeGroupDetail returned by\n `PATCH /volume_group/{id}`.\n * Added new Field `configuredSlaDomainType` to VolumeGroupSummary returned by\n `GET /volume_group`.\n * Added new Field `configuredSlaDomainType` to VsphereCategory returned by\n `GET /vmware/vcenter/{id}/tag_category`.\n * Added new Field `configuredSlaDomainType` to VsphereCategory returned by\n `GET /vmware/vcenter/tag_category/{tag_category_id}`.\n * Added new Field `configuredSlaDomainType` to VsphereTag returned by\n `GET /vmware/vcenter/{id}/tag`.\n * Added new Field `configuredSlaDomainType` to VsphereTag returned by\n `GET /vmware/vcenter/tag/{tag_id}`.\n * Added a new optional query parameter `name` to\n `GET /user/{id}/organization`.\n * Added new field `hostLogRetentionHours` to OracleDbSummary returned by\n `GET /oracle/db`.\n * Added new field `isCustomRetentionApplied` to AppBlueprintSnapshotSummary\n returned by `GET /app_blueprint/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to AppBlueprintSnapshotDetail\n returned by `GET /app_blueprint/snapshot/{id}` .\n * Added new field `isCustomRetentionApplied` to AwsEc2InstanceSummary returned\n by `GET /aws/ec2_instance`.\n * Added new field `isCustomRetentionApplied` to AwsEc2InstanceDetail returned\n by `GET /aws/ec2_instance/{id}`.\n * Added new field `isCustomRetentionApplied` to AwsEc2InstanceDetail returned\n by `PATCH /aws/ec2_instance/{id}`.\n * Added new field `isCustomRetentionApplied` to\n HypervVirtualMachineSnapshotSummary returned by\n `GET /hyperv/vm/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to\n HypervVirtualMachineSnapshotDetail returned by\n `GET /hyperv/vm/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to ManagedVolumeSnapshotSummary\n returned by `GET /managed_volume/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to ManagedVolumeSnapshotSummary\n returned by `POST /managed_volume/{id}/end_snapshot`.\n * Added new field `isCustomRetentionApplied` to ManagedVolumeSnapshotDetail\n returned by `GET /managed_volume/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to NutanixVmSnapshotSummary\n returned by `GET /nutanix/vm/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to NutanixVmSnapshotDetail\n returned by `GET /nutanix/vm/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to OracleDbSnapshotSummary\n returned by `GET /oracle/db/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to OracleDbSnapshotDetail returned\n by `GET /oracle/db/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to\n StorageArrayVolumeGroupSnapshotSummary returned by\n `GET /storage/array_volume_group/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to\n StorageArrayVolumeGroupSnapshotDetail returned by\n `GET /storage/array_volume_group/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to VcdVappSnapshotSummary returned\n by `GET /vcd/vapp/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to VcdVappSnapshotDetail returned\n by `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `isCustomRetentionApplied` to VolumeGroupSnapshotSummary\n returned by `GET /volume_group/{id}/snapshot`.\n * Added new field `isCustomRetentionApplied` to VolumeGroupSnapshotDetail\n returned by `GET /volume_group/snapshot/{id}`.\n * Added optional field `isQueuedSnapshot` to the response of\n GET `/managed_volume/{id}/snapshot`, GET `/managed_volume/snapshot/{id}`.\n and POST `/managed_volume/{id}/end_snapshot`.\n The field specifies if ManagedVolume snapshots are in queue to be stored\n as patch file.\n * Added new field `securityLevel` to `SnmpTrapReceiverConfig` object as\n optional input parameter for SNMPv3, which is used in\n `PATCH /cluster/{id}/snmp_configuration` and\n `GET /cluster/{id}/snmp_configuration`.\n * Added new field `advancedRecoveryConfigBase64` to `ExportOracleDbConfig`.\n and `MountOracleDbConfig` objects as optional input parameter\n during Oracle recovery.\n * Added new optional field `isRemote` to UnmanagedObjectSummary object, which\n is returned from a `GET /unmanaged_object` call.\n * Added new field `hostLogRetentionHours` to OracleRacDetail returned by\n `GET /oracle/rac/{id}` and `PATCH /oracle/rac/{id}`.\n * Added new field `hostLogRetentionHours` to OracleHostDetail returned by\n `GET /oracle/host/{id}` and `PATCH /oracle/host/{id}`.\n * Added new field `hostLogRetentionHours` to OracleDbDetail returned by\n `GET /oracle/db/{id}` and `PATCH /oracle/db/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of AppBlueprintSnapshotSummary returned\n by `GET /app_blueprint/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of AppBlueprintSnapshotDetail returned by\n `GET /app_blueprint/snapshot/{id}` .\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of AwsEc2InstanceSummary returned by\n `GET /aws/ec2_instance`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of AwsEc2InstanceDetail returned by\n `GET /aws/ec2_instance/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of AwsEc2InstanceDetail returned by\n `PATCH /aws/ec2_instance/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of HypervVirtualMachineSnapshotSummary\n returned by `GET /hyperv/vm/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of HypervVirtualMachineSnapshotDetail\n returned by `GET /hyperv/vm/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of ManagedVolumeSnapshotSummary returned\n by `GET /managed_volume/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of ManagedVolumeSnapshotSummary returned by\n `POST /managed_volume/{id}/end_snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of ManagedVolumeSnapshotDetail returned\n by `GET /managed_volume/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of NutanixVmSnapshotSummary returned by\n `GET /nutanix/vm/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of NutanixVmSnapshotDetail returned by\n `GET /nutanix/vm/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of OracleDbSnapshotSummary returned by\n `GET /oracle/db/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of OracleDbSnapshotDetail returned by\n `GET /oracle/db/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of StorageArrayVolumeGroupSnapshotSummary\n returned by `GET /storage/array_volume_group/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of StorageArrayVolumeGroupSnapshotDetail\n returned by `GET /storage/array_volume_group/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of VcdVappSnapshotSummary returned by\n `GET /vcd/vapp/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of VcdVappSnapshotDetail returned by\n `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of VolumeGroupSnapshotSummary returned by\n `GET /volume_group/{id}/snapshot`.\n * Added new field `snapshotFrequency` to `snapshotLocationRetentionInfo` field\n of `SnapshotRetentionInfo` field of VolumeGroupSnapshotDetail returned by\n `GET /volume_group/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to AppBlueprintSnapshotSummary\n returned by `GET /app_blueprint/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to AppBlueprintSnapshotDetail\n returned by `GET /app_blueprint/snapshot/{id}` .\n * Added new field `SnapshotRetentionInfo` to AwsEc2InstanceSummary returned\n by `GET /aws/ec2_instance`.\n * Added new field `SnapshotRetentionInfo` to AwsEc2InstanceDetail returned\n by `GET /aws/ec2_instance/{id}`.\n * Added new field `SnapshotRetentionInfo` to AwsEc2InstanceDetail returned\n by `PATCH /aws/ec2_instance/{id}`.\n * Added new field `SnapshotRetentionInfo` to\n HypervVirtualMachineSnapshotSummary returned by\n `GET /hyperv/vm/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to\n HypervVirtualMachineSnapshotDetail returned by\n `GET /hyperv/vm/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to ManagedVolumeSnapshotSummary\n returned by `GET /managed_volume/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to ManagedVolumeSnapshotSummary\n returned by `POST /managed_volume/{id}/end_snapshot`.\n * Added new field `SnapshotRetentionInfo` to ManagedVolumeSnapshotDetail\n returned by `GET /managed_volume/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to NutanixVmSnapshotSummary\n returned by `GET /nutanix/vm/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to NutanixVmSnapshotDetail\n returned by `GET /nutanix/vm/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to OracleDbSnapshotSummary\n returned by `GET /oracle/db/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to OracleDbSnapshotDetail returned\n by `GET /oracle/db/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to\n StorageArrayVolumeGroupSnapshotSummary returned by\n `GET /storage/array_volume_group/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to\n StorageArrayVolumeGroupSnapshotDetail returned by\n `GET /storage/array_volume_group/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to VcdVappSnapshotSummary returned\n by `GET /vcd/vapp/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to VcdVappSnapshotDetail returned\n by `GET /vcd/vapp/snapshot/{id}`.\n * Added new field `SnapshotRetentionInfo` to VolumeGroupSnapshotSummary\n returned by `GET /volume_group/{id}/snapshot`.\n * Added new field `SnapshotRetentionInfo` to VolumeGroupSnapshotDetail\n returned by `GET /volume_group/snapshot/{id}`.\n * Added optional field `networkInterface` to `NetworkThrottleUpdate`. The\n field allows users to specify non standard network interfaces. This applies\n to the `PATCH /network_throttle/{id}` endpoint.\n * Added mandatory field `networkInterface` to `NetworkThrottleSummary`.\n This applies to the endpoints `GET /network_throttle` and\n `GET /network_throttle/{id}`.\n * Added endpoint `POST /cluster/{id}/manual_discover`, which allows\n the customer to manually input data that would be learned using\n mDNS discovery. Returns same output as discover.\n * `PATCH /cluster/{id}/snmp_configuration` will now use\n `SnmpConfigurationPatch` as a parameter.\n * Added optional field `user` to `SnmpTrapReceiverConfig`. The field\n specifies which user to use for SNMPv3 traps.\n * Added optional field `users` to `SnmpConfiguration`. The field contains\n usernames of users configured for SNMPv3.\n * Added two new models `SnmpUserConfig` to store user credentials and\n `SnmpConfigurationPatch`.\n * Added new endpoint `POST /role/authorization_query` to get authorizations\n granted to roles.\n * Added new endpoint `GET /role/{id}/authorization` to get authorizations\n granted to a role.\n * Added new endpoint `POST /role/{id}/authorization` to grant authorizations\n to a role.\n * Added new endpoint `POST /role/{id}/authorization/bulk_revoke` to revoke\n authorizations from a role.\n * Added optional field `recoveryInfo` to UnmanagedObjectSummary.\n * Added optional field `isRetentionLocked` to SlaInfo.\n The parameter indicates that the SLA Domain associated with the job is a\n Retention Lock SLA Domain.\n * Added optional field `legalHoldDownloadConfig` to\n `FilesetDownloadFilesJobConfig`,`HypervDownloadFileJobConfig`,\n `DownloadFilesJobConfig`,`ManagedVolumeDownloadFileJobConfig`,\n `NutanixDownloadFilesJobConfig`,`StorageArrayDownloadFilesJobConfig`,\n `VolumeGroupDownloadFilesJobConfig`.This is an optional argument\n containing a Boolean parameter to depict if the download is being\n triggered for Legal Hold use case. This change applies to\n /fileset/snapshot/{id}/download_files,\n /hyperv/vm/snapshot/{id}/download_file,\n /vmware/vm/snapshot/{id}/download_files,\n /managed_volume/snapshot/{id}/download_file,\n /nutanix/vm/snapshot/{id}/download_files,\n /storage/array_volume_group/snapshot/{id}/download_files and\n /volume_group/snapshot/{id}/download_files endpoints.\n * Added optional field isPlacedOnLegalHold to BaseSnapshotSummary.\n The Boolean parameter specifies whether the snapshot is placed under a\n Legal Hold.\n * Added new endpoint `GET /ods_configuration`.\n Returns the current configuration of on-demand snapshot handling.\n * Added new endpoint `PUT /ods_configuration`.\n Update the configuration of on-demand snapshot handling.\n * Added two new models `OdsConfigurationSummary`, `OdsPolicyOnPause` and a new\n enum `SchedulingType`.\n * Added `odsPolicyOnPause` field in `OdsConfigurationSummary` to include the\n policy followed by the on-demand snapshots, during an effective pause.\n * Added new enum field `schedulingType` in `OdsPolicyOnPause` to support\n deferring the on-demand snapshots during an effective pause.\n * Added optional query parameter `show_snapshots_legal_hold_status` to\n `GET /archive/location` endpoint, indicating if `isLegalHoldSnapshotPresent`.\n field should be populated in response.\n * Added storage array volume group asynchronous request status endpoint\n `GET /storage/array_volume_group/request/{id}`. Request statuses for\n storage array volume groups which previously used\n `/storage/array/request/{id}` must now use this new endpoint.\n * Added forceFull parameter to the properties of patch volume group object\n to permit forcing a full snapshot for a specified volume group.\n * Added `isDcaAccountInstance` field to `AwsEc2InstanceSummary` to indicate\n whether the EC2 instance belongs to a DCA account. This impacts the endpoints\n `GET /aws/ec2_instance` and `GET /aws/ec2_instance/{id}`.\n * Added `encryptionKeyId` as an optional field in CreateCloudInstanceRequest\n definition used in the on-demand API conversion API `/cloud_on/aws/instance`.\n to support KMS encryption for CloudOn conversion in AWS.\n * Added new endpoint `GET /job/{id}/child_job_instance`.\n Returns the child job instances (if any) spawned by the given parent job\n instance. This endpoint requires a support token to access.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include the `isConsolidationEnabled` field, to indicate\n if consolidation is enabled for the given archival location.\n * Changed `encryptionPassword` parameter to optional in\n `NfsLocationCreationDefinition` to support creating NFS archival location\n without encryption via `POST /archive/nfs`.\n * Added an optional parameter `disabledEncryption` to\n `NfsLocationCreationDefinition` with a default value of false, to enable or\n disable encryption via `POST /archive/nfs`.\n * Added a new model `ValidationResponse` and REST API endpoints\n `/cloud_on/validate/instantiate_on_cloud` and\n `/cloud_on/validate/cloud_image_conversion` for validation of cloud\n conversion.\n * Added `sortBy` and `sortOrder` parameters to `GET /hyperv/vm/snapshot/mount`.\n to allow sorting of Hyper-V mounts.\n Added the enum `HypervVirtualMachineMountListSortAttribute`, defining which\n properties of Hyper-V mounts are sortable.\n * Added an optional field `shouldApplyToExistingSnapshots` in\n `SlaDomainAssignmentInfo` to apply the new SLA configuration to existing\n snapshots of protected objects.\n * Added a new optional field `isOracleHost` to `HostRegister` in\n `POST /host/bulk` and `HostUpdate` in `PATCH /host/bulk` to indicate if we\n should discover Oracle information during registration and host refresh.\n * Added a new model `NutanixVirtualDiskSummary` that is returned by\n `GET /nutanix/vm/{id}` to include the disks information for a Nutanix\n virtual machine.\n * Added mandatory field `pendingSnapshot` to `SystemStorageStats`, which is\n returned by `GET /stats/system_storage`.\n * Added optional isIsilonChangelistEnabled in the NasBaseConfig and NasConfig.\n NasBaseConfig is returned as part of HostSummary, which is returned by the\n `Get /host/envoy` and `Get /host` endpoints. NasConfig is used by\n HostRegister and HostUpdate. The HostRegister is used by the\n `Post /host/bulk` endpoint and the HostUpdate is used by the\n `PATCH /host/bulk` endpoint.\n * Added a new model `HostShareParameters`. This model has two fields,\n isNetAppSnapDiffEnabled and isIsilonChangelistEnabled. The\n isNetAppSnapDiffEnabled is a Boolean value that specifies whether the\n SnapDiff feature of NetApp NAS is used to back up the NAS share. The\n isIsilonChangelistEnabled is a Boolean value that specifies whether\n the Changelist feature of Isilon NAS is used to back up the NAS share.\n * Added optional field `HostShareParameters` in `HostFilesetShareSummary`,\n `HostFilesetShareDetail` and `HostShareDetail`. The HostShareDetail impacts\n the endpoints `Get /host/share` and `Post /host/share`. The\n `HostFilesetShareDetail` impacts the endpoint `Get /host_fileset/share/{id}`.\n . The HostFilesetShareSummary impacts the endpoint\n `Get /host_fileset/share`.\n * Added `isInVmc` in `GET /vcd/vapp/{id}`, and `PATCH /vcd/vapp/{id}`.\n to return whether the virtual machine is in a VMC setup.\n * Added new endpoint `GET /vmware/hierarchy/{id}/export`. Returns the\n VmwareHierarchyInfo object with the given ID.\n * Added optional field `platformDetails` to `PlatformInfo`, which is returned\n by `GET /cluster/{id}/platforminfo`.\n * Added optional field `cpuCount` to `PlatformInfo`, which is returned by\n `GET /cluster/{id}/platforminfo`.\n * Added optional field `ramSize` to `PlatformInfo`, which is returned by\n `GET /cluster/{id}/platforminfo`.\n * Added new value `RangeInTime` to `RecoveryPointType` enum, which is used in\n the `ReportTableRequest` object for the POST `/report/{id}/table` and POST\n `/report/data_source/table` endpoints.\n * Added the optional field `shouldForceFull` to `MssqlDbUpdate` object,\n which is referred by `MssqlDbUpdateId`, which is referred as the\n body parameter of `PATCH /mssql/db/bulk`.\n\n ### Changes to Internal API in Rubrik version 5.1.1\n ## Breaking changes:\n * Changed response code of a successful\n `POST /managed_volume/{id}/begin_snapshot` API from 201 to 200.\n\n ### Changes to Internal API in Rubrik version 5.1.0\n ## Breaking changes:\n * Changed response type of percentInCompliance and percentOutOfCompliance\n in ComplianceSummary to double.\n * Renamed new enum field `MissedSnapshots` to `MissedLocalSnapshots`.\n and `LastSnapshot` to `LatestLocalSnapshot`, in the\n following properties:\n measure property in ChartSummary, column property in TableSummary,\n and sortBy property in ReportTableRequest.\n * Renamed effectiveThroughput to throughput in EventSeriesMonitoredJobSummary.\n * Renamed realThroughput to throughput in EventSeriesSummary.\n * Updated response of GET /event_series/{id} to remove effectiveThroughput.\n * Renamed paths `/storage/array/volume/group` to `/storage/array_volume_group`.\n * Renamed the field cassandraSetup in ReplaceNodeStatus to metadataSetup\n * Renamed the field cassandraSetup in RecommisionNodeStatus to metadataSetup\n * Renamed the field cassandraSetup in AddNodesStatus to metadataSetup\n * Renamed the field cassandraSetup in ClusterConfigStatus to metadataSetup\n * Renamed the field removeCassandra in RemoveNodeStatus to removeMetadatastore\n for the GET /cluster/{id}/remove_node endpoint.\n * Moved the `GET /blackout_window` endpoint from internal to V1.\n * Moved the `PATCH /blackout_window` endpoint from internal to V1.\n * Removed endpoint POST /report/global_object endpoint.\n /report/data_source/table can be used to get the same information.\n * Made accessKey optional in ObjectStoreLocationDetail as accessKey is not\n defined in Cross Account Role Based locations. Also made accessKey required\n again in ObjectStoreLocationDefinition.\n * Removed `progressPercentage` from `EventSeriesMonitoredJobSummary` object.\n * Removed endpoint `POST cluster-id-security-password-strength` since it is\n no longer used at bootstrap.\n * Moved the GET `/mssql/hierarchy/{id}/descendants` and\n GET `/mssql/hierarchy/{id}/children` endpoints from internal to v1.\n\n ## Feature Additions/improvements:\n * GET POST /cluster/{id}/node now accepts an optional encryption\n password in the encryptionPassword field.\n * GET /node_management/replace_node now accepts an optional encryption\n password in the encryptionPassword field.\n * Added optional field `shouldSkipScheduleRecoverArchivedMetadataJob` to\n the body parameter of `POST /archive/object_store/reader/connect`, to\n determine whether to schedule the archival recovery job.\n When the value is 'false,' the recovery job is scheduled normally.\n When the value is 'true,' the recovery job is not scheduled.\n The default behavior is to schedule the recovery job.\n * Added mandatory field `cdp` to SystemStorageStats.\n * Added optional field `agentStatus` to NutanixHierarchyObjectSummary.\n The field indicates whether a Rubrik backup agent is registered to the\n Nutanix object.\n * Added optional field `shouldUseAgent` to `RestoreFilesJobConfig`.\n in `POST /vmware/vm/snapshot/{id}/restore_files` to specify\n whether to use Rubrik Backup Service to restore files. Default value is true.\n * GET /managed_object/bulk/summary and GET\n /managed_object/{managed_id}/summary no longer include archived objects\n with no unexpired snapshots in their results.\n * Added new required Boolean field `isDbLocalToTheCluster` to\n `OracleDbSummary` and `OracleDbDetail`.\n * Added optional field `awsAccountId` to ObjectStoreLocationSummary.\n * Added optional field `shouldRecoverSnappableMetadataOnly` to all the\n reader location connect definitions.\n * Added new enum value `ArchivalComplianceStatus` to the following properties:\n attribute property in ChartSummary and column property in TableSummary\n * Added new enum fields `ArchivalInComplianceCount`,\n `ArchivalNonComplianceCount` and `MissedArchivalSnapshots` to the\n following properties:\n measure property in ChartSummary, column property in TableSummary,\n and sortBy property in ReportTableRequest.\n * GET /managed_object/bulk/summary and GET\n /managed_object/{managed_id}/summary will always include the correct relic\n status for hosts and their descendants.\n * Added field `isLocked` to PrincipalSummary.\n * Added optional query parameter `snappableStatus` to /vmware/data_center and\n /vmware/host. This parameter enables a user to fetch the set of protectable\n objects from the list of objects visible to that user.\n * Added optional field `archivalComplianceStatus` to RequestFilters\n * Added optional field `archivalComplianceStatus` to FilterSummary\n * Added optional field `alias` to HostSummary, HostRegister, and HostUpdate\n schemas. This field will allow the user to specify an alias for each host\n which can be used for search.\n * Added optional field `subnet` to ManagedVolumeExportConfig\n * Added optional field `status` to oracle/hierarchy/{id}/children\n * Added optional field `status` to oracle/hierarchy/{id}/descendants\n * Added optional field `status` to hyperv/hierarchy/{id}/children\n * Added optional field `status` to hyperv/hierarchy/{id}/descendants\n * Added optional field `numNoSla` to ProtectedObjectsCount\n * Added optional field `numDoNotProtect` to ProtectedObjectsCount\n * Added optional field `limit`, `offset`, `sort_by`, `sort_order` to\n /node/stats\n * Added optional field encryptionAtRestPassword to configure password-based\n encryption for an edge instance.\n * Added new endpoint GET /report/data_source/{data_source_name}/csv.\n * Added new endpoint POST /report-24_hour_complianace_summary.\n * Added new endpoint POST /report/data-source/{data_source_name} to get\n columns directly from report data source.\n * Added optional field compliance24HourStatus to RequestFilters object.\n * Added the `port` optional field to QstarLocationDefinition. The `port` field\n enables a user to specify the server port when adding a new location or\n editing an existing location.\n * Added optional field archivalTieringSpec to ArchivalSpec and ArchivalSpecV2\n to support archival tiering. This enables the user to configure either\n Instant Tiering or Smart Tiering (with a corresponding minimum accessible\n duration) on an SLA domain with archival configured to an Azure archival\n location.\n * Updated endpoints /vcd/vapp, /oracle/db and /aws/ec2_instance\n to have a new optional query paramter, indicating if backup task information\n should be included.\n * Added optional field logConfig to SlaDomainSummaryV2, SlaDomainDefinitionV2\n and SlaDomainPatchDefintionV2 to support CDP (ctrlc). The parameters\n distinguish SLAs with CDP enabled from SLAs with CDP disabled, and enable\n users to specify log retention time. The field also provides an optional\n frequency parameter whhich can be used by Oracle and SQL Server log backups.\n * Added optional field logRetentionLimit to ReplicationSpec to support\n CDP replication. The field gives the retention limit for logs at the\n specified location.\n * Moved the `GET /vmware/compute_cluster` endpoint from internal to V1.\n * Moved the `GET /vmware/compute_cluster/{id}` endpoint from internal to V1.\n * Changed the existing `PATCH mssql/db/bulk` endpoint to return an\n unprotectable reason as a string in the `unprotectableReason` field instead\n of a JSON struct.\n * Added optional field `kmsMasterKeyId` and changed the existing field\n `pemFileContent` to optional field in `DcaLocationDefinition`.\n * Added new optional field `enableHardlinkSupport` to FilesetSummary and\n FilesetCreate in `POST /fileset`, \"GET /fileset\" and \"PATCH /fileset/{id}\"\n endpoints to enable recognition and deduplication of hardlinks in\n fileset backup.\n * Added optional query parameter to `GET /archive/location` endpoint,\n indicating if `isRetentionLockedSnapshotProtectedPresent` field should\n be populated in response.\n * Added continuous data protection state for each VMware virtual machine\n * Added new endpoint `PUT /polaris/archive/proxy_setting`.\n * Added new endpoint `GET /polaris/archive/proxy_setting/{id}`.\n * Added new endpoint `DELETE /polaris/archive/proxy_setting/{id}`.\n * Added new endpoint `PUT /polaris/archive/aws_compute_setting`.\n * Added new endpoint `GET /polaris/archive/aws_compute_setting/{id}`.\n * Added new endpoint `DELETE /polaris/archive/aws_compute_setting/{id}`.\n * Added new endpoint `PUT /polaris/archive/azure_compute_setting`.\n * Added new endpoint `GET /polaris/archive/azure_compute_setting/{id}`.\n * Added new endpoint `DELETE /polaris/archive/azure_compute_setting/{id}`.\n * Added new endpoint `PUT /polaris/archive/aws_iam_location`.\n * Added new endpoint `GET /polaris/archive/aws_iam_location/{id}`.\n * Added new endpoint `PUT /polaris/archive/azure_oauth_location`.\n * Added new endpoint `GET /polaris/archive/azure_oauth_location/{id}`.\n * Added new endpoint `PUT /polaris/archive/aws_iam_customer_account`.\n * Added new endpoint `GET /polaris/archive/aws_iam_customer/{id}`.\n * Added new endpoint `DELETE /polaris/archive/aws_iam_customer/{id}`.\n * Added new endpoint `PUT /polaris/archive/azure_oauth_customer`.\n * Added new endpoint `GET /polaris/archive/azure_oauth_customer/{id}`.\n * Added new endpoint `DELETE /polaris/archive/azure_oauth_customer/{id}`.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include `currentState` field, to indicate whether the archival\n location is connected or temporarily disconnected.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include `isComputeEnabled` field, to indicate whether the\n archival location has cloud compute enabled.\n * Added optional field `cloudStorageTier` to `BaseSnapshotSummary`, to indicate\n the current storage tier of the archived copy of a snapshot.\n * Added endpoint `PUT /polaris/archive/aws_iam_location/reader_connect`.\n to connect as a reader to an IAM based AWS archival location.\n * Added endpoint `PUT /polaris/archive/azure_oauth_location/reader_connect`.\n to connect as a reader to an OAuth based Azure archival location.\n * Added endpoint `POST polaris/archive/location/{id}/reader/promote`.\n to promote the current cluster to be the owner of a specified IAM based AWS\n archival location that is currently connected as a reader location.\n * Added endpoint `POST polaris/archive/location/{id}/reader/refresh`.\n to sync the current reader cluster with the contents on the IAM based AWS\n archival location.\n * Added effectiveSlaDomainName and effectiveSlaDomainSourceId fields\n to `GET /vmware/vcenter/{id}/tag_category` response object.\n * Added effectiveSlaDomainName and effectiveSlaDomainSourceId fields\n to `GET /vmware/vcenter/{id}/tag` response object.\n * Added continuous data protection status for reporting.\n * Added optional field `localCdpStatus` to the following components:\n ChartSummary, TableSummary, ReportTableRequest, RequestFilters and\n FilterSummary.\n * Added `ReportSnapshotIndexState` and `ReportObjectIndexType` to\n `/internal_report_models/internal/definitions/enums/internal_report.yml`.\n * Added optional field `latestSnapshotIndexState` and `objectIndexType` to\n the following components:\n TableSummary, ReportTableRequest, RequestFilters and FilterSummary.\n * Added 24 hour continuous data protection healthy percentage for reporting.\n * Added optional field `PercentLocal24HourCdpHealthy` to the following\n components: TableSummary, ReportTableRequest.\n * Added optional field `replicas` to MssqlHierarchyObjectSummary.\n * Added optional field `hosts` to MssqlHierarchyObjectSummary.\n * Added continuous data protection local log storage size and local throughput\n consumption for reporting.\n * Added optional fields `localCdpThroughput` and `localCdpLogStorage` to the\n following components: ChartSummary, TableSummary and ReportTableRequest.\n * Added optional field requestExclusionFilters to ReportTableRequest.\n * Added an optional field to ManagedVolumeSummary to retrieve the associated\n subnet.\n * Added optional field isEffectiveSlaDomainRetentionLocked to Snappable.\n The parameter depicts if the effective SLA domain for the snappable is\n a Retention Lock SLA Domain.\n * Updated the set of possible continuous data protection statuses for each\n VmwareVirtualMachine.\n * Added the optional field isEffectiveSlaDomainRetentionLocked to\n FilesetSummary. The field is a Boolean that specifies whether the effective\n SLA Domain of a fileset is retention locked.\n * Added optional field iConfiguredSlaDomainRetentionLocked to SlaAssignable.\n The parameter depicts if the configured SLA domain for the object is a\n Retention Lock SLA Domain.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include the `isTieringSupported` field, to indicate\n whether a given archival location supports tiering.\n * Added continuous data protection replication status for reporting.\n * Added CdpReplicationStatus as an optional field to the TableSummary and\n ReportTableRequest components.\n * Added optional CdpReplicationStatus field to RequestFilters and\n FiltersSummary.\n * Added optional field isEffectiveSlaDomainRetentionLocked to\n SearchItemSummary. The Boolean parameter specifies whether the effective\n SLA Domain for the search item is a Retention Lock SLA Domain.\n * Updated `OracleMountSummary` returned by GET /oracle/db/mount\n endpoint to include the isInstantRecovered field, to indicate\n whether the mount was created during an Instant Recovery or Live Mount.\n * Added optional field isEffectiveSlaDomainRetentionLocked to\n ManagedObjectSummary. The Boolean parameter specifies whether the effective\n SLA Domain for the search item is a Retention Lock SLA Domain.\n * Added optional field `isRetentionSlaDomainRetentionLocked` to\n UnmanagedSnapshotSummary. The parameter indicates that the retention SLA\n Domain associated with the snapshot is a Retention Lock SLA Domain.\n * Added optional field `isSlaRetentionLocked` to EventSeriesSummary.\n The parameter indicates that the SLA Domain associated with the event\n series is a Retention Lock SLA Domain.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include the `isConsolidationEnabled` field, to indicate\n if consolidation is enabled for the given archival location.\n * Added the `hasUnavailableDisks` field to `NodeStatus` to indicate whether a\n node has unavailable (failed or missing) disks. This change affects the\n endpoints `GET /cluster/{id}/node`, `GET /node`, `GET /node/{id}`, `GET\n /node/stats`, and `GET /node/{id}/stats`.\n * Added optional NAS vendor type to the HostShareDetail\n This change affectes the endpoints `Get /host/share`, `Post /host/share` and\n `Get /host/share/{id}`.\n * Added optional isSnapdiffEnabled in the NasBaseConfig and NasConfig\n NasBaseConfig is returned as part of HostSummary, which is returned by the\n `Get /host/envoy` and `Get /host` endpoints. NasConfig is used by\n HostRegister and HostUpdate. The HostRegister field is used by the\n `Post /host/bulk` endpoint and the HostUpdate is field used by the\n `PATCH /host/bulk` endpoint.\n * Added optional snapdiffUsed in the FilesetSnapshotSummary\n The FilesetSnapshotSummary is used by FilesetDetail and\n FilesetSnapshotDetail. This change affects the endpoints `Post\n /fileset/bulk`, `Get /host_fileset/share/{id}` and\n `Get /fileset/snapshot/{id}`.\n\n ### Changes to Internal API in Rubrik version 5.0.4\n ## Feature Additions/improvements:\n * Added objectState to FilterSummary which is part of body parameter of\n PATCH/report/{id}\n * Added objectState to RequestFilters which is part of body parameter of\n POST /report/data_source/table\n\n ### Changes to Internal API in Rubrik version 5.0.3\n ## Breaking changes:\n * Removed fields 'virtualMedia' and 'ssh' from IpmiAccess and\n IpmiAccessUpdate.\n\n ## Feature Additions/improvements:\n * Added a new optional field 'oracleQueryUser' to HostRegister, HostUpdate\n and HostDetail objects, for setting the Oracle username for account with\n query privileges on the host. This applies to the following endpoints:\n `POST /host/bulk`, `PATCH /host/{id}`, and `GET /host/{id}`.\n * Added a field `affectedNodeIds` to the `SystemStatus` object. This object is\n returned by `GET /cluster/{id}/system_status`.\n * Made `nodeId` a required field of the `DiskStatus` object. This object, or\n an object containing this object, is returned by the following endpoints:\n `GET /cluster/{id}/disk`, `PATCH /cluster/{id}/disk/{disk_id}`, and\n `GET /node/{id}`.\n\n ### Changes to Internal API in Rubrik version 5.0.2\n ## Feature Additions/improvements:\n * Added an optional fields `subnet` to `ManagedVolumeSummary` to retrieve the associated\n subnet.\n * Added `tablespaces` field in `OracleDbSnapshotSummary` to include the list\n of tablespaces in the Oracle database snapshot.\n * Added new endpoint `POST /hierarchy/bulk_sla_conflicts`.\n * Added optional field `limit`, `offset`, `sort_by`, `sort_order` to\n `GET /node/stats`.\n * Added optional field `numNoSla` to `ProtectedObjectsCount`.\n * Added optional field `numDoNotProtect` to `ProtectedObjectsCount`.\n * Introduced optional field `logicalSize` to `VirtualMachineDetail`. This\n field gives the sum of logical sizes of all the disks in the virtual\n machine.\n * Added optional fields `nodeIds`, `slaId`, `numberOfRetries`, and\n `isFirstFullSnapshot` to the response of `GET /event_series/{id}`.\n * Added `SapHanaLog` tag in `applicationTag` field of `ManagedVolumeConfig`.\n for SAP HANA log managed volumes.\n * Added required field `dbSnapshotSummaries` in `OracleRecoverableRange` to include\n the list of database snapshots in each Oracle recoverable range.\n * Added field `isOnline` to MssqlDbSummary and changed `hasPermissions` to\n required field.\n * Added `DbTransactionLog` tag, in applicationTag field of\n `ManagedVolumeConfig`, for generic log managed volumes. ApplicationTag has\n to be specified in the request field of POST /managed_volume.\n\n ### Changes to Internal API in Rubrik version 5.0.1\n ## Breaking changes:\n * Removed `GET/POST /smb/enable_security` endpoints.\n * Changed the `objectId` type in `EventSeriesMonitoredJobSummary` and\n `EventSeriesSummary` to a user-visible ID instead of a simple ID.\n * Updated endpoint `POST /smb/domain` to accept a list of domain controllers.\n * Removed endpoint `POST /report/global_object`.\n * Added optional field `kmsMasterKeyId` and changed the existing field\n `pemFileContent` to optional field in `DcaLocationDefinition`.\n * Removed `progressPercentage` from `EventSeriesMonitoredJobSummary` object.\n\n ## Feature Additions/improvements:\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include `currentState` field, to indicate whether the archival\n location is connected or temporarily disconnected.\n * Added optional field `subnet` to ManagedVolumeExportConfig.\n * Added the`PUT /smb/config` endpoint to manage SMB configuration.\n * Added the following two endpoints.\n - `GET /stats/per_vm_storage`.\n - `GET /stats/per_vm_storage/{vm_id}`.\n * Added optional field `isStickySmbService` to the response of\n `GET /smb/domain` and `POST /smb/domain`.\n * Added new endpoint `GET /report/data_source/{data_source_name}/csv`.\n * Added new endpoint `POST /report_24_hour_complianace_summary`.\n * Added new endpoint `POST /report/data-source/{data_source_name}` to get\n columns directly from report data source.\n * Added new report API endpoints:\n - `GET /report/summary/physical_storage_time_series`.\n - `GET /report/summary/average_local_growth_per_day`.\n * Added `GET /node/stats` which returns stats for all nodes.\n * Added `GET /cluster/{id}/security/password/zxcvbn` to return\n the enabled or disabled status of ZXCVBN validation for new passwords.\n * Added `POST /cluster/{id}/security/password/zxcvbn` to toggle\n ZXCVBN validation for new passwords.\n\n ### Changes to Internal API in Rubrik version 5.0.0\n ## Breaking changes:\n * Removed `/user_notification` endpoints.\n * Added `rawName` field in `ArchivalLocationSummary`, which contains the\n raw name of the archival location.\n * Removed `shareType` from config field in PATCH /managedvolume request.\n * Changed `/cluster/me/ntp_server` endpoint to accept symmetric keys\n and the corresponding hashing algorithm.\n * Removed `/job/type/prune_job_instances` endpoint.\n * Removed `/kmip/configuration` endpoint.\n * Removed `/session/api_token` endpoint.\n * Added `subnet` field in `ManagedVolumeConfig`, which specifies an outgoing\n VLAN interface for a Rubrik node. This is a required value when creating a\n managed volume on a Rubrik node that has multiple VLAN interfaces.\n * Removed the `VolumeGroupVolumeSummary`, and replaced it with\n `HostVolumeSummary`.\n * Removed `volumeIdsIncludedInSnapshots` from `VolumeGroupDetail`.\n * Added new optional fields `mssqlCbtEnabled`, `mssqlCbtEffectiveStatus`,\n `mssqlCbtDriverInstalled`, `hostVfdEnabled` and `hostVfdDriverState` to\n GET /host/{id} response.\n * Responses for `/cluster/{id}/dns_nameserver` and\n `/cluster/{id}/dns_search_domain` changed to be array of strings.\n * Added new required field `language` in `UserPreferencesInfo` for\n GET /user/{id}/preferences and PATCH /user/{id}/preferences\n * Added new field `missedSnapshotTimeUnits` in `MissedSnapshot`.\n * Removed `localStorage` and `archiveStorage` from `UnmanagedSnapshotSummary`.\n * Moved the `Turn on or off a given AWS cloud instance` endpoint from PATCH of\n `/cloud_on/aws/instance` to PATCH of `/cloud_on/aws/instance/{id}/cloud_vm`.\n Also removed the `id` field from the definition of `CloudInstanceUpdate`.\n * Moved the `Turn on or off a given Azure cloud instance` endpoint from PATCH\n of `/cloud_on/azure/instance` to PATCH of\n `/cloud_on/azure/instance/{id}/cloud_vm`. Also removed the `id` field from\n the definition of `CloudInstanceUpdate`.\n * Moved the `Delete a given AWS cloud instance` endpoint from DELETE of\n `/cloud_on/aws/instance/{id}` to DELETE of\n `/cloud_on/aws/instance/{id}/cloud_vm`.\n * Moved the `Delete a given Azure cloud instance` endpoint from DELETE of\n `/cloud_on/azure/instance/{id}` to DELETE of\n `/cloud_on/azure/instance/{id}/cloud_vm`.\n * Modified the existing endpoint DELETE `/cloud_on/aws/instance/{id}` to\n remove entry of a given AWS cloud instance instead of terminating the\n instance.\n * Modified the existing endpoint DELETE `/cloud_on/azure/instance/{id}` to\n remove entry of a given Azure cloud instance instead of terminating the\n instance.\n * Removed `/job/type/job-schedule_gc_job_start_time_now` endpoint. Use\n endpoint POST `/job/type/garbageCollection` to schedule a GC job to\n run now.\n * Removed `config` parameter from `/job/type/garbageCollection`.\n * Added optional parameter `jobInstanceId` to `EventSummary`.\n * Added `jobInstanceId` as a new optional query parameter for\n GET /event_series/{id}/status endpoint.\n * Modified the endpoint GET /event_series/status to a POST and changed the\n input parameter to a request body of type `EventSeriesDetail`.\n * Modified the endpoint PATCH /replication/target/{id} to take a request body\n of type ReplicationTargetUpdate instead of ReplicationTargetDefinition.\n * Added Discovery EventType.\n * Added `name` and deprecated `hostname` in `HostSummary` and `HostDetail`.\n response.\n * Added `isDeleted` and deprecated `isArchived` in MssqlDbReplica response.\n * Removed `GET /stats/cloud_storage` endpoint.\n * Removed DELETE /oracle/db/{id} endpoint to delete an Oracle database.\n * By default, a volume group is not associated with any volumes at creation\n time. This default is a change from the 4.2 implementation, where newly\n created volume groups contain all of the host volumes. On 5.0 clusters,\n use the `GET /host/{id}/volume` endpoint to query all host volumes.\n\n ## Feature Additions/improvements:\n * Added new endpoint POST/report/data-source/{data_source_name} to get columns\n directly from report data source.\n * Added optional field compliance24HourStatus to RequestFilters object.\n * Added GET /event/event_count-by-status to get job counts based on job status.\n * Added GET /event/event_count-by-job-type to get job counts based on job type.\n * Added GET /event_series endpoint to get all event series information in the\n past 24 hours.\n * Added `oracleDatabase` to ManagedObjectDescendantCounts.\n * Introduced `POST /session/realm/{name}` endpoint to generate session\n tokens in the LDAP display name of {name}.\n * Added optional `storageClass` field to `ObjectStoreReaderConnectDefinition`.\n to store `storageClass` field for the newly connected reader location.\n * Added optional `encryptionType` field to `ObjectStoreLocationSummary` to\n return encryption type used for an object store archival location.\n * Added a new endpoint POST /oracle/db/download/{snapshot_id} to download\n a particular snapshot (and corresponding logs) for Oracle.\n * Added optional `ownerId` and `reference` fields to\n `/managed_volume/{id}/begin_snapshot`.\n * Added new endpoints regarding references to Managed Volumes, which track\n the processes writing to the Managed Volume.\n - GET `/managed_volume/{id}/snapshot/{snapshot_id}/reference/{reference_id}`.\n PUT `/managed_volume/{id}/snapshot/{snapshot_id}/reference/{reference_id}`.\n PATCH\n `/managed_volume/{id}/snapshot/{snapshot_id}/reference/{reference_id}`.\n DELETE\n `/managed_volume/{id}/snapshot/{snapshot_id}/reference/{reference_id}`.\n are the endpoints for viewing, adding, editing and deleting a Managed\n Volume snapshot reference respectively.\n * Added optional `apiToken` and `apiEndpoint` fields to NasConfig to support\n Pure FlashBlade devices.\n * Added optional `smbValidIps`, `smbDomainName` and `smbValidUsers` fields\n to `VolumeGroupMountSnapshotJobConfig` to support secure SMB.\n * Added optional `smbDomainName`, `smbValidIps`, `smbValidUsers` fields to\n ManagedVolumeExportConfig to support secure SMB.\n * Added a new optional field `oracleSysDbaUser` to /host/{id} POST endpoint\n during register host for setting the Oracle username for account with sysdba\n privileges on this host.\n * Added a new endpoint DELETE /smb/domain/{domain_name} to delete the\n SMB Domain.\n * Added a new endpoint POST /smb/domain/{domain_name}/join to configure\n SMB Domain.\n * Added a new optional filed `oracleSysDbaUser` to /host/{id} endpoint for\n changing the Oracle username for account with sysdba privileges on this\n host.\n * Added a new endpoint POST /smb/enable_security to enable Live Mount\n security\n * Made the `numChannels` field in ManagedVolumeConfig optional.\n * Added `applicationTag` field to ManagedVolumeConfig to specify workload\n type for a managed volume.\n * Added Maintenance EventType\n * Added POST `/report/global_object` endpoint to directly query table data\n from GlobalObject based on ReportTableRequest\n * Added new API endpoint GET `/diagnostic/snappable/{id}` returns\n diagnostic information of all backup tasks of a data source.\n * Added new API endpoint GET `/diagnostic/snappable/{id}/latest` returns\n diagnostic information of the most recent backup task of a data source.\n * Added `shareType` field to ManagedVolumeSummary and ManagedVolumeDetail.\n * Added oracle instant recovery API to trigger instant recovery of a\n database.\n * Added RAC, Oracle host and Oracle database fields to the the oracle\n hierarchy API\n * Added a new endpoint GET /smb/domain to get a list of discovered\n SMB domains in the environment.\n * Added a new endpoint GET /notification_setting to get all Notification\n Settings.\n * Added a new endpoint POST /notification_setting to create a new\n Notification Setting.\n * Added a new endpoint GET /notification_setting/{id} to get a Notification\n Setting specified by the input id.\n * Added a new endpoint PATCH /notification_setting/{id} to update the values\n for a specified Notification Setting.\n * Added a new endpoint DELTE /notification_setting/{id} to delete a\n specified Notification Setting.\n * Introduced `POST /oracle/db/snapshot/{id}/export/tablespace` endpoint to\n trigger the export of a single tablespace in an Oracle database.\n * Added a new optional field `shouldRestoreFilesOnly` to POST\n /oracle/db/snapshot/{id}/export endpoint, used when exporting an Oracle\n database, to specify whether the user requires a full recovery of the\n database or a restoration of the database files.\n * Added /oracle/hierarchy/{id}/children endpoint to get children of\n object in Oracle hierarchy\n * Added /oracle/hierarchy/{id}/descendants endpoint to get descendants of\n object in Oracle hierarchy\n * Added a new endpoint POST /fileset/{id}/unprotect, which can be used to\n unprotect a fileset and specify a retention policy to apply to existing\n snapshots.\n * Added a new optional field `existingSnapshotRetention` to POST\n /sla_domain/{id}/assign, used when unprotecting an object, to specify whether\n to retain existing snapshots according to the current SLA domain, keep\n existing snapshots forever, or expire all snapshots immediately. If not\n specified, this field will default to the existing behavior of keeping\n snapshots forever.\n * Introduced `GET /kmip/client` endpoint to get the stored KMIP client\n configuration.\n * Introduced `PUT /kmip/client` endpoint to set the KMIP client configuration.\n * Introduced `GET /kmip/server` endpoint to get stored KMIP server\n information.\n * Introduced `PUT /kmip/server` endpoint to add a a KMIP server.\n * Introduced `DELETE /kmip/server` endpoint to remove a a KMIP server.\n * Introduced `POST /session` endpoint to generate session tokens.\n * Added a new optional field `mfaServerId` to /user endpoint for\n associating a configured MFA server.\n * Added REST support for Oracle RAC, Oracle Host.\n Updated the detail and summary for Oracle Database.\n * Added support to run on-demand backup jobs, export snapshots, live\n mount for Oracle Database.\n * Introduced `POST /mfa/rsa/server` endpoint to\n create a new RSA server configuration for MFA integration.\n * Introduced `GET /mfa/rsa/server` endpoint to\n get a list of RSA server configured for MFA integration.\n * Introduced `PATCH /mfa/rsa/server/{id}` endpoint to\n modify RSA server configuration.\n * Introduced `GET /mfa/rsa/server/{id}` endpoint to\n get RSA server configuration.\n * Introduced `POST /mfa/initialize` to initialize an attempt\n to perform Multifactor authentication for a user.\n * Introduced `POST /mfa/session` to perform Multifactor\n authentication for a user.\n * Introduced `POST /session/api_token` to create an API Token.\n * Added a new optional field `isArrayEnabled` to `FilesetTemplateCreate`.\n for creation of storage array-enabled fileset templates. We also include\n this new field in `FilesetTemplateDetail`.\n * Added a new optional field `arraySpec` to `FilesetCreate` for\n creation of storage array-enabled filesets. We also include\n this new field in `FilesetSummary` and `FilesetDetail`.\n * Introduced `GET /cluster/{id}/is_azure_cloud_only` to query if the cluster\n supports only Azure public cloud.\n * Introduced `POST /unmanaged_object/assign_retention_sla` to set Retention\n SLA of unmanaged objects.\n * Introduced `POST /unmanaged_object/snapshot/assign_sla` to set Retention\n SLA of unmanaged snapshots.\n * Introduced `POST /mssql/db/bulk/snapshot/{id}` to take an on-demand snapshot\n of multiple SQL Server databases. The result of this asynchronous request\n can be obtained from `GET /mssql/db/bulk/snapshot/{id}`.\n * Added a new field unprotectable_reasons to GET /mssql/db/{id} and\n GET /mssql/instance/{id}. This field keeps track of the reasons that a\n SQL Server database or instance cannot be protected by Rubrik.\n * Introduced a new `GET /cluster/me/login_banner` and\n `PUT /cluster/me/login_banner` endpoints to get and set the banner\n that displays after each successful login.\n * Introduced a new `GET /cluster/me/security_classification` and\n `PUT /cluster/me/security_classification` endpoints to get and set\n the security classification banner for the cluster. The cluster UI\n displays the banner in the specified color.\n * Introduced `GET /cluster/{id}/security/rksupport_cred` to provide\n the status of the rksupport credentials.\n * Introduced `POST /cluster/{id}/security/rksupport_cred` to update\n the cluster-wide credentials for the specified cluster.\n * Introduced `POST /vmware/vm/snapshot/{id}/mount_disks` to attach VMDKs\n from a mount snapshot to an existing virtual machine\n * Introduced new `GET /host/{id}/volume` endpoint to query the HostVolume\n from the host.\n * Added the `HostVolumeSummary`, which is the response of the endpoint\n `GET /host/{id}/volume` and a part of `VolumeGroupDetail`.\n * Introduced a new `GET /volume_group/host_layout/{snapshot_id}` and\n `GET /volume_group/{host_id}/host_layout` to get the Windows host layout\n of all disks and volumes.\n * Added `WindowsHostLayout` which is the response of\n `GET /volume_group/host_layout/{snapshot_id}` and\n `GET /volume_group/{host_id}/host_layout`.\n * Added support for Blueprint.\n * Added new fields `retentionSlaDomainId` and `retentionSlaDomainName` to\n UnmanagedObjectSummary object, which is returned from a\n `GET /unmanaged_object` call.\n * Removed `unmanagedSnapshotCount` and added new fields `autoSnapshotCount`.\n and `manualSnapshotCount` to UnmanagedObjectSummary object, which is\n returned from a `GET /unmanaged_object` call.\n * Added new fields `retentionSlaDomainId` and `retentionSlaDomainName` to\n UnmanagedSnapshotSummary object, which is returned from a\n `GET /unmanaged_object/{id}/snapshot` call.\n * Added a new field `hasAttachingDisk` to `GET /vmware/vm/snapshot/mount` and\n `GET /vmware/vm/snapshot/mount/{id}` that indicates to the user whether\n this is an attaching disk mount job.\n * Added a new field `attachingDiskCount` to `GET /vmware/vm/snapshot/mount`.\n and `GET /vmware/vm/snapshot/mount/{id}` that indicate to the user how many\n disks are attached.\n * Added field `RetentionSlaDomainName` to sort_by of a\n `GET * /unmanaged_object/{id}/snapshot` call.\n * Added field `excludedDiskIds` to NutanixVmDetail which is returned from a\n `GET /nutanix/vm/{id}` to exclude certain disks from backup. Also added\n field to NutanixVmPatch via `PATCH /nutanix/vm/{id}` to allow the field\n to be updated.\n * Introduced the `PATCH /aws/ec2_instance/indexing_state` endpoint for\n enabling/disabling indexing per EC2 instance.\n * Added new optional fields `organizationId` and `organizationName` to\n `/host/{id}` and `/host` endpoints to get the organization a host is\n assigned to due to Envoy.\n * Introduced a new `GET /host/envoy` endpoint. Acts similar to queryHost but\n also includes Envoy organization info if Envoy is enabled.\n * Added a new endpoint `GET /vmware/vcenter/{id}/tag_category` to get a list of\n Tag Categories associated with a vCenter.\n * Added a new endpoint `Get /vmware/vcenter/tag_category/{tag_category_id}` to\n get a specific Tag Category associated with a vCenter.\n * Added a new endpoint `GET /vmware/vcenter/{id}/tag` to get a list of Tags\n associated with a vCenter. The optional category_id parameter allow the\n response to be filtered by Tag Category.\n * Added a new endpoint `GET /vmware/vcenter/tag/{tag_id}` to get a\n specific Tag associated with a vCenter.\n * Introduced `GET /cluster/{id}/global_manager_connectivity` to\n retrieve a set of URLs that are pingable from the CDM cluster.\n * Added optional field `instanceName` in `ManagedObjectProperties`.\n * Added new endpoint GET `/cloud_on/aws/app_image/{id}` to retrieve a specified\n AWS AppBlueprint image.\n * Added new endpoint DELETE `/cloud_on/aws/app_image/{id}` to delete the\n given AWS AppBlueprint image.\n * Added new endpoint GET `/cloud_on/azure/app_image/{id}` to retrieve a\n specified Azure AppBlueprint image.\n * Added new endpoint DELETE `/cloud_on/azure/app_image/{id}` to delete the\n given Azure AppBlueprint image.\n * Added organization endpoint for Oracle.\n * Added new endpoint GET `/cloud_on/aws/app_image` to retrieve all\n AWS AppBlueprint images.\n * Added new endpoints `GET /stats/cloud_storage/physical`, `GET\n /stats/cloud_storage/ingested` and `GET /stats/cloud_storage/logical` which\n return respective stats aggregated across all archival locations\n * Added a new endpoint `POST /vmware/standalone_host/datastore` to get a list\n of datastore names for a given ESXi host.\n * Added a new optional field `apiEndpoint` to `NasBaseConfig`.\n\n ### Changes to Internal API in Rubrik version 4.2\n ## Breaking changes:\n * Introduced a new `GET /cluster/{id}/ipv6` endpoint for getting all IPv6\n addresses configured on a specific or all network interfaces.\n * Introduced a new `PATCH /cluster/{id}/ipv6` endpoint for configuring IPv6\n addresses on a specific network interface for each nodes in cluster.\n * Introduced a new `GET /cluster/{id}/trial_edge` for getting whether the\n cluster is a trial edge.\n * Moved the /auth_domain/ endpoint from internal APIs to the v1 APIs.\n * Deprecated `POST /archive/nfs/reconnect` endpoint. Use\n `POST /archive/nfs/reader/connect` instead to connect as a reader to an\n existing NFS archival location.\n * Deprecated `POST /archive/object_store/reconnect` endpoint. Use\n `POST /archive/object_store/reader/connect` instead to connect as a reader to\n an existing object store location.\n * Deprecated `POST /archive/qstar/reconnect` endpoint. Use\n `POST /archive/qstar/reader/connect` instead to connect as a reader to an\n existing QStar archival location.\n * Deprecated `POST /archive/dca/reconnect` endpoint. Use\n `POST /archive/dca/reader/connect` instead to connect as a reader to an\n existing DCA archival location.\n * Removed `POST /hyperv/vm/snapshot/{id}/restore_file` endpoint. Use\n `POST /hyperv/vm/snapshot/{id}/restore_files` instead to support\n multi-files restore for Hyper-V vm.\n * Removed `POST /nutanix/vm/snapshot/{id}/restore_file` endpoint. Use\n `POST /nutainx/vm/snapshot/{id}/restore_files` instead to support\n multi-files restore for Nutanix vm.\n * Removed `search_timezone_offset` parameter from\n `GET /unmanaged_object/{id}/snapshot` endpoint. The endpoint will now\n use configured timezone on the cluster.\n * Renamed the field `id` in `UserDefinition` to `username` for `POST /user`.\n endpoint.\n * Removed the `/mssql/db/sla/{id}/availability_group_conflicts` endpoint.\n * Removed the `/mssql/db/sla/{id}/assign` endpoint.\n * Added support for Envoy VMs for Organization.\n * Modified the `DELETE /storage/array/{id}` endpoint so that it now triggers\n an asynchronous deletion job, responds with an async request object, and\n archives the storage array's hierarchy.\n * Added `numStorageArrayVolumeGroupsArchived` to `DataLocationUsage` which\n is the response of the `GET /stats/data_location/usage` endpoint.\n * Modified `POST /storage/array` endpoint so that it now triggers an\n asynchronous refresh job, and responds with an async request object.\n * Modified the `GET /storage/array/{id}` and `DELETE /storage/array/{id}`.\n endpoints so that the `id` field now corresponds to the managed ID\n instead of the simple ID. The `managed ID` is the ID assigned to the\n storage array object by the Rubrik REST API server.\n * Moved /throttle endpoint to /backup_throttle.\n * Introduced a new `EmailSubscriptionUpdate` object for the request of the\n `PATCH /report/email_subscription/{subscription_id}` endpoint.\n * Introduced a new `ReportSubscriptionOwner` object for the response of\n `GET /report/email_subscription/{subscription_id}` and\n `GET /report/{id}/email_subscription` endpoints.\n * Added the envoyStatus field to the response of the GET /organization\n endpoint.\n * Added new `attachments` field to the `POST /report/{id}/email_subscription`.\n and `PATCH /report/email_subscription/{subscription_id}` endpoints.\n * Removed fields `length` and `isLog` in response of\n `/mssql/db/{id}/restore_files`.\n * Moved the `/cluster/decommissionNode` endpoint to\n `/cluster/decommissionNodes`. The `DecommissionNodeConfig` object is renamed\n as `DecommissionNodesConfig` and now takes in a list of strings which\n correspond to the IDs of the nodes that are to be decommissioned.\n * Moved the `POST /vmware/vm/{id}/register_agent` endpoint from internal\n APIs to the v1 APIs.\n * Added a required field for environment in AzureComputeSummary to support\n Azure Gov Cloud.\n * Remove `POST internal/vmware/vm/snapshot/{id}/mount` endpoint. Use public\n API of `POST v1/vmware/vm/snapshot/{id}/mount`.\n * The input field OperatingSystemType value `Linux` is replaced by `UnixLike`.\n in FilesetTemplateCreateDefinition, used by POST /fileset-template, and\n in FilesetTemplatePatchDefinition, used by PATCH /fileset_template/{id}.\n * The input field operating_system_type value `Linux` is replaced by `UnixLike`.\n in GET /host-fileset and GET /host-count.\n * Added `snmpAgentPort` field to SnmpConfig object.\n\n ## Feature Additions/improvements:\n * Introduced the `GET /node_management/default_gateway` and `POST\n /node_management/default_gateway` endpoint to get and set default gateway.\n * Introduced the `GET cloud_on/aws/instance_type_list` and `GET\n cloud_on/azure/instance_type_list` endpoint to fetch list of instance types\n for aws and azures.\n * Introduced the `GET /aws/account/{id}/subnet` endpoint to fetch an\n information summary for each of the subnets available in an AWS account.\n * Introduced the `GET /aws/account/{id}/security_group` endpoint to fetch an\n information summary for each of the security groups belonging to a particular\n virtual network in an AWS account.\n * Moved definitions `Subnet` and `SecurityGroup` of `definitions/cloud_on.yml`.\n to `definitions/cloud_common.yml` so that both the CloudOn and CloudNative\n features can use them.\n * Introduced the `GET /host/{id}/diagnose` endpoint to support target host\n diagnosis features. Network connectivity (machine/agent ping) implemented\n in the current version.\n * Added vCD endpoints to support vCloud Director. The following endpoints\n have been added to the vcdCluster object:\n - `POST /vcd/cluster` to add a new vCD cluster object.\n * Added support for CRUD operations on vCloud Director cluster objects.\n - POST /vcd/cluster, PATCH /vcd/cluster/{id}, DELETE /vcd/cluster/{id},\n POST /vcd/cluster/{id}/refresh are the endpoints for adding, editing,\n deleting and refreshing a vCD cluster object respectively.\n * Introduced endpoint `GET /search/snapshot_search` to search files in a\n given snapshot. The search supports prefix search only.\n * Introduced the new `POST /storage/array/{id}/refresh` endpoint to\n create a new refresh job to update the Storage Array metadata.\n * Introduced the new `GET /storage/array/request/{id}` endpoint to\n get status of a storage array-related asynchronous request.\n * Introduced the new `POST /storage/array/volume/group` endpoint\n to add a new storage array volume group.\n * Introduced the new `GET /storage/array/volume/group/{id}` endpoint\n to get details of a storage array volume group.\n * Introduced the new `DELETE /storage/array/volume/group/{id}` endpoint\n to remove a storage array volume group.\n * Introduced the new `GET /storage/array/hierarchy/{id}` endpoint\n to get a summary of an object in the storage array hierarchy.\n * Introduced the new `GET /storage/array/hierarchy/{id}/children` endpoint\n to get the children of an object in the storage array hierarchy.\n * Introduced the new `GET /storage/array/hierarchy/{id}/descendants` endpoint\n to get the descendants of an object in the storage array hierarchy.\n * Introduced the new `GET /storage/array/volume` endpoint to get\n summary information of all storage array volumes.\n * Introduced the new `GET /storage/array/volume/{id}` endpoint to get\n details of a storage array volume.\n * Introduced the new `POST /storage/array/volume/group/{id}/snapshot`.\n endpoint to create a new on-demand backup job for a storage array\n volume group.\n * Introduced the new `PATCH /storage/array/volume/group/{id}` endpoint to\n update the properties of a storage array volume group object.\n * Introduced the new `GET /storage/array/volume/group` endpoint to\n get all storage array volume groups subject to specified filters.\n * Introduced endpoint `POST /archive/location/{id}/owner/pause` to pause\n archiving to a given archival location that is owned by the current cluster.\n * Introduced endpoint `POST /archive/location/{id}/owner/resume` to resume\n archiving to a given archival location that is owned by the current cluster.\n * Introduced endpoint `POST /archive/location/{id}/reader/promote` to promote\n the current cluster to be the owner of a specified archival location that is\n currently connected as a reader location.\n * Introduced endpoint `POST /archive/location/{id}/reader/refresh` to sync the\n current reader cluster with the contents on the archival location. This pulls\n in any changes made by the owner cluster to the archival location since the\n last time the current cluster was synced.\n * Introduced endpoint `POST /archive/dca/reader/connect` to connect as a reader\n to a DCA archival location.\n * Introduced endpoint `POST /archive/nfs/reader/connect` to connect as a reader\n to an NFS archival location.\n * Introduced endpoint `POST /archive/object_store/reader/connect` to connect as\n a reader to an object store location.\n * Introduced endpoint `POST /archive/dca/qstar/connect` to connect as a reader\n to a QStar archival location.\n * Updated `ArchivalLocationSummary` returned by `GET /archive/location`.\n endpoint to include `ownershipStatus` field, to indicate whether the current\n cluster is connected to the archival location as an owner (active or paused),\n as a reader, or if the archival location is deleted.\n * Added the `ca_certs` field to `StorageArrayDefinition` to allow admins\n to specify certificates used for validation when making network\n requests to the storage array API service. This effects endpoints\n `POST /storage/array`, `GET /storage/array/{id}`, and\n `PUT /storage/array/{id}`.\n * Introduced the `POST /vmware/vm/snapshot/{id}/download_files` endpoint to\n download multiple files/folders from a given vm snapshot. The URL to\n download the zip file including the files will be presented to the users.\n * Introduced the `POST /fileset/snapshot/{id}/download_files` endpoint to\n download multiple files/folders from a given fileset snapshot. The URL to\n download the zip file including the specific files/folders will be presented\n to the users.\n * Introduced the `POST /nutanix/vm/snapshot/{id}/download_files` endpoint to\n download multiple files/folders from a given nutanix snapshot. The URL to\n download the zip file including the specific files/folders will be presented\n to the users.\n * Removed the `POST /nutanix/vm/snapshot/{id}/download_file` endpoint as\n downloading a single file/folder from the nutanix backup is just a special\n case of downloading multiple files/folders.\n * Introduced the `POST /hyperv/snapshot/{id}/download_files` endpoint to\n download multiple files/folders from a given Hyper-V snapshot. The URL to\n download the zip file including the specific files/folders will be presented\n to the users.\n * Introduced the POST /managed_volume/snapshot/{id}/download_files endpoint\n to download multiple files and/or folders from a given managed volume\n snapshot. This endpoint returns the URL to download the ZIP file that\n contains the specified files and/or folders.\n * Introduced the new `GET /storage/array/volume/group/{id}/search` endpoint to\n search storage array volume group for a file.\n * Introduced the new `GET /storage/array/volume/group/snapshot/{id}`.\n endpoint to retrieve details of a storage array volume group snapshot.\n * Introduced the new `DELETE /storage/array/volume/group/snapshot/{id}`.\n endpoint to remove a storage array volume group snapshot.\n * Introduced the new `DELETE /storage/array/volume/group/{id}` endpoint\n to delete all snapshots of a storage array volume group.\n * Introduced the new `POST /storage/array/volume/group/{id}/download`.\n endpoint to download a storage array volume group snapshot from archival.\n * Introduced new `GET/storage/array/volume/group/snapshot/{id}/restore_files`.\n endpoint to restore files from snapshot of a storage array volume group.\n * Added storage volume endpoints for AWS cloud native workload protection.\n Endpoints added:\n - GET /aws/ec2_instance/{id}/storage_volume/ to retrieve\n all storage volumes details attached to an ec2 instance object.\n - GET /aws/ec2_instance/{ec2_instance_id}/storage_volume/{id} to retrieve\n details of a storage volume attached to an ec2 instance object.\n - POST /aws/ec2_intance/snapshot/{id}/export to export the snapshot of\n an ec2 instance object to a new ec2 instance object.\n * Introduced the new `POST /storage/array/volume/group/{id}/download_file`.\n endpoint to download a file from an archived storage array volume group\n snapshot.\n * Introduced the new `GET /storage/array/volume/group/{id}/missed_snapshot`.\n endpoint to get details about all missed snapshots of a storage array volume\n group.\n * Introduced the `GET /network_throttle` endpoint for retrieving the list of\n network throttles.\n * Introduced the `PATCH /network_throttle/{id}` endpoint for updating\n network throttles.\n * Introduced the new `GET /storage/array/host/{id}` endpoint to get details\n about all storage array volumes connected to a host.\n * Introduced the `GET /organization/{id}/storage/array` endpoint for getting\n information for authorized storage array resources in an organization.\n * Introduced the `GET /organization/{id}/storage/array/volume_group/metric`.\n endpoint for getting storage array volume groups metrics in an\n organization.\n * Introduced the new POST /vmware/vm/snapshot/mount/{id}/rollback endpoint to\n rollback the datastore used by a virtual machine, after an Instant Recovery\n that used the preserve MOID setting. This endpoint `rolls back` the\n recovered virtual machine's datastore from the Rubrik cluster to the\n original datastore.\n * Added `owner` and `status` fields to the `EmailSubscriptionSummary`.\n object used in responses for many `/report/{id}/email_subscription`.\n and `/report/email_subscription/{subscription_id}` endpoints.\n * Added `availableSpace` and `readerLocationSummary` fields to the\n `NfsLocationDetail` object used in responses for `/archive/nfs` and\n `/archive/nfs/{id}` endpoints.\n * Added `availableSpace` and `readerLocationSummary` fields to the\n `QstarLocationSummary` object used in responses for the `/archive/qstar`.\n endpoint.\n * Added `availableSpace` and `readerLocationSummary` fields to the\n `QstarLocationDetail` object used in responses for the `/archive/qstar/{id}`.\n endpoint.\n * Added `readerLocationSummary` field to the `ObjectStoreLocationDetail`.\n object used in responses for the `/archive/object_store` and\n `/archive/object_store/{id}` endpoints.\n * Added `readerLocationSummary` field to the `DcaLocationDetail` object\n used in responses for the `/archive/dca` and `/archive/dca/{id}` endpoints.\n * Added a new field `guestOsType` to `HypervVirtualMachineDetail`.\n object used in response of `GET /hyperv/vm/{id}`.\n * Added a new field `guestOsType` to `VirtualMachineDetail`.\n object referred by `VappVmDetail`.\n * Added new field `fileType` in response of `/mssql/db/{id}/restore_files`.\n * Added an optional field `agentStatus` to `VirtualMachineSummary` object used\n in response of `GET /vmware/vm` endpoint. This allows user to check the\n Rubrik Backup Service connection status of the corresponding VMware VM.\n * Introduced the new `POST /fileset/snapshot/{id}/export_files` endpoint to\n export multiple files or directories to destination host.\n * Introduced the new `GET /vmware/config/esx_subnets` endpoint to get the\n the preferred subnets to reach ESX hosts.\n * Introduced the new `PATCH /vmware/config/reset_esx_subnets` endpoint to\n reset the preferred subnets to reach ESX hosts.\n * Changed the `PATCH /vmware/config/reset_esx_subnets` endpoint to\n `PATCH /vmware/config/set_esx_subnets`.\n * Removed the `needsInspection` field from the NodeStatus object returned in\n the `/cluster/{id}/node` and `/node` endpoints.\n * Introduced the new `PATCH /auth_domain/{id}` endpoint to update the Active\n Directory configuration parameters.\n * Introduced the new `GET /cluster/{id}/auto_removed_node` endpoint to\n query for unacknowledged automatic node removals by the Rubrik cluster.\n * Introduced the new\n `DELETE /cluster/{id}/auto_removed_node/{node_id}/acknowledge` endpoint to\n acknowledge an automatic node removal.\n * Introduced the new `GET /cluster/{id}/system_status` endpoint to retrieve\n information about the status of the Rubrik cluster.\n * Changed the `POST /cloud_on/azure/subscription` endpoint to to take\n the parameter `AzureSubscriptionRequest` instead of\n `AzureSubscriptionCredential` in body.\n * Changed the `POST /cloud_on/azure/storage_account` endpoint to to take\n the parameter `AzureStorageAccountRequest` instead of\n `AzureStorageAccountCredential` in body.\n * Changed the `POST /cloud_on/azure/resource_group` endpoint to take\n the parameter `AzureResourceGroupRequest` instead of\n `AzureResourceGroupCredential` in body.\n * Added a `reportTemplate` field to the response of both the\n `GET /report/{id}/table` and `GET /report/{id}/chart` endpoints.\n\n ### Changes to Internal API in Rubrik version 4.1\n ## Changes to support instance from image\n * POST /aws/instance and /azure/instance was supported only from a Rubrik\n snapshot. Now it is changed to support instantiation from Rubrik snapshot as\n well as pre-existing image. Rest end point is same, we just changed the\n CreateCloudInstanceRequest object type.\n * Add a new field `ignoreErrors` to POST /vmware/vm/snapshot/{id}/restore_files\n that will let job restore ignore file errors during restore job.\n ## Breaking changes:\n * None is removed as a Nutanix snapshot consistency mandate so it is no\n longer valid in GET /nutanix/vm, GET /nutanix/vm/{id}, and\n PATCH /nutanix/vm/{id}.\n * computeSecurityGroupId is replaced by the object defaultComputeNetworkConfig\n in ObjectStoreLocationSummary ,ObjectStoreUpdateDefinition and\n ObjectStoreReconnectDefinition which are used by\n GET /archive/object_store/{id}, PATCH /archive/object_store/{id} and\n POST /archive/object_store/reconnect respectively.\n * The PUT /throttle endpoint was changed to provide configuration for\n Hyper-V adaptive throttling. Three parameters were added:\n hypervHostIoLatencyThreshold, hypervHostCpuUtilizationThreshold, and\n hypervVmCpuUtilizationThreshold. To differentiate between the multiple\n hypervisors, the existing configuration parameters for VMware were renamed\n VmwareVmIoLatencyThreshold, VmwareDatastoreIoLatencyThreshold and\n VmwareCpuUtilizationThreshold. These changes also required modifications\n and additions to the GET /throttle endpoint.\n * For `POST /cluster/{id}/node` endpoint, it gets now `AddNodesConfig` in body\n instead of `Map_NodeConfig` directly.\n * For `POST /node_management/replace_node` endpoint, added the `ipmiPassword`.\n field to the `ReplaceNodeConfig` object.\n * For `POST /stats/system_storage` endpoint, added the miscellaneous, liveMount\n and snapshot field to `SystemStorageStats` object.\n * For `POST /principal_search`, removed `managedId` field from the\n `PrincipalSummary` object and changed the `id` field of the\n `PrincipalSummary` object to correspond to the managed id instead of the\n simple id.\n * For `GET /cluster/{id}/timezone` and `PATCH /cluster/{id}/timezone`, the\n functionality has merged into `GET /cluster/{id}` and `PATCH /cluster/{id}`.\n in v1.\n * Removed the `GET /cluster/{id}/decommissionNodeStatus` endpoint.\n Decommission status is now available through queries of the `jobId` that is\n returned by a decommission request. Queries can be performed at the\n `GET /job/{id}` endpoint.\n * For `GET /api/internal/managed_volume/?name=`, the name match is now\n exact instead of infix\n * Updated the list of available attribute and measure values for the `chart0`.\n and `chart1` parameters for the `PATCH /report/{id}` endpoint.\n * Updated the list of available column values for the `table` parameter for the\n `PATCH /report/{id}` endpoint.\n * Updated the `FolderHierarchy` response object to include\n `effectiveSlaDomainId`, `effectiveSlaDomainName`,\n `effectiveSlaSourceObjectId`, and `effectiveSlaSourceObjectName`.\n\n ## Feature Additions/improvements:\n * Added the field `pendingSnapshotCount` to ManagedVolumeSummary and\n ManagedVolumeDetail objects used in responses for endpoints\n `GET /managed_volume`, `POST /managed_volume`, `GET /managed_volume/{id}`,\n `PATCH /managed_volume/{id}`, `GET /organization/{id}/managed_volume`.\n * Introduced the `GET /managed_volume/snapshot/export/{id}` endpoint\n to retrieve details of a specific managed volume snapshot export.\n * Added the `name` filter for GET requests on the /replication/target endpoint.\n This filter allows users to filter results based on the name of a\n replication target.\n * Added the `name` filter for GET requests on the /archive/location endpoint.\n This filter allows users to filter results based on the name of an\n archival location.\n * Added new fields `replicas` and `availabilityGroupId` on GET /mssql\n and GET /mssql/{id}. If a database is an availability database,\n it will have some number of replicas, which are copies of the database\n running on different instances. Otherwise, there will only be one\n replica, which represents the single copy of the database. The field\n `availabilityGroupId` will be set only for availability databases\n and points to the availability group of the database. Also deprecated\n several fields on these endpoints, as they should now be accessed via\n the `replicas` field.\n * Added `Cluster` notification type.\n * Added optional `organizationId` parameter to to the grant/revoke and get\n authorization endpoints. This parameter can be used to\n grant/revoke/get authorizations with respect to a specific Organization.\n * Added endpoint to get/set whether the Rubrik Backup Service is automatically\n deployed to a guest OS.\n * Added cloudInstantiationSpec field to Hyper-V VM endpoint for configuring\n automatic cloud conversion\n * Introduced a new end point /cluster/{id}/platforminfo to GET information\n about the platform the current software is running on\n * Introduced the `GET /organization` and `GET /organization/{id}` endpoints\n for retrieving the list of organizations and a single organization.\n * Introduced the `POST /organization` endpoint for creating organizations,\n the `PATCH /organization/{id}` endpoint for updating organizations and the\n `DELETE /organization/{id}` endpoint for deleting organizations.\n * Introduced the `GET /organization/{id}/stats/storage_growth_timeseries`.\n endpoint and the `GET /organization/{id}/stats/total_storage_usage` for\n getting Physical Storage Growth over Time and Total Physical Storage Usage\n on a per Organization basis.\n * Introduced a number of endpoints of the format\n `GET /organization/{id}/` for retrieving all the resources of\n the corresponding type in a given organization.\n * Introduced a number of endpoints of the format\n `GET /organization/{id}//metric` for retrieving the protection\n counts of the resources of the corresponding type in a given organization.\n * Added the `reportTemplate` filter for GET requests on the /report endpoint.\n This allows queried reports to be filtered and sorted by report template.\n * Introduced the `POST /cluster/{id}/security/password/strength` endpoint\n for assessing the strength of passwords during bootstrap through rkcli.\n * Added a new `ipv6` field in the response of the `GET /cluster/{id}/discover`.\n endpoint.\n * Added relatedIds field for EventSummary object to give more context about\n the event.\n * Added operatingSystemType field for NutanixSummary object. This field\n represents the type of operating system on the Nutanix virtual machine.\n\n ### Changes to Internal API in Rubrik version 4.0\n ## Breaking changes:\n * For `GET /unmanaged_object` endpoint, replaced the `Fileset` of object_type\n filter with more specific object types: `WindowsFileset`, `LinuxFileset` and\n `ShareFileset`. Also added filter value for additional unmanaged objects\n we now support.\n * For /mssql/db/{id}/compatible_instance added recoveryType as mandatory\n query parameter\n\n ## Feature Additions/improvements:\n * Added QStar end points to support it as an archival location. The location\n is always encrypted and an encryption password must be set while adding the\n location. End points added:\n - `DELETE /archive/qstar` to clean up the data in the bucket in the QStar\n archival location.\n - `GET /archive/qstar` to retrieve a summary of all QStar archival locations.\n - `POST /archive/qstar` to add a QStar archival location.\n - `POST /archive/qstar/reconnect` to reconnect to a specific QStar archival\n location.\n - `POST /archive/qstar/remove_bucket` to remove buckets matching a prefix\n from QStar archival location.\n - `GET /archive/qstar/{id}` to retrieve a summary information from a specific\n QStar archival location.\n - `PATCH /archive/qstar/{id}` to update a specific QStar archival location.\n * Added the `name` filter for GET requests on the /archive/location endpoint.\n This filter allows users to filter results based on the name of an\n archival location.\n * Introduced an optional parameter `encryptionPassword` for the\n `/data_location/nfs` `POST` endpoint. This password is used for\n deriving the master key for encrypting the NFS archival location.\n * Introduced /managed\\_volume, /managed\\_volume/snapshot/export/{id},\n and other child endpoints for creating, deleting, and updating\n Managed Volumes and its exports and snapshots.\n * Added support for Hyper-V.\n * Add new /hierarchy endpoint to support universal hierarchy view.\n * Added support for Nutanix.\n * Moved and merged vCenter refresh status and delete status from independent\n internal endpoints to a single status field in v1 vCenter detail.\n * Added endpoint to get/set whether the Rubrik Backup Service is automatically\n deployed to a guest OS.\n * Introduced an optional parameter `minTolerableNodeFailures` for the\n `/cluster/decommissionNode` `POST` endpoint. This parameter specifies the\n minimum fault tolerance to node failures that must exist when a node is\n decommissioned.\n * Added `nodeId` to `AsyncRequestStatus` to improve debugging job failures.\n\n ### Changes to Internal API in Rubrik version 3.2.0\n ## Breaking changes:\n * Introduced endpoint /host/share/id/search to search for\n files on the network share.\n * Introduced endpoints /host/share and /host/share/id to\n support native network shares under /host endpoint.\n * For /unmanaged_object endpoints, change sort_attr to sort_by\n sort_attr used to accept a comma separated list of column names to sort.\n Now sort_by only accepts a single column name.\n * For /unmanaged_object endpoints, removed the need for object type when\n deleting unmanaged objects and its snapshots.\n\n ## Feature Additions/improvements:\n * Added internal local_ end points. These are used for\n handling operations on per-node auto-scaling config values.\n Please see src/spec/local-config/comments for details.\n * For the response of /mssql/db/{id}/restore_files, added two more fields\n for each file object. They are the original file name and file length\n of the file to be restore.\n * Introduced a new end point /cluster/{id}/is_registered to GET registration\n status. With this change, we can query if the cluster is registered in the\n Rubrik customer database.\n * Introduced a new end point /cluster/{id}/registration_details to POST\n registration details. Customers are expected to get the registration details\n from the support portal. On successful submission of registration details\n with a valid registration id, the cluster will mark itself as registered.\n * For the /mssql/instance/{id} end point, added fields configuredSlaDomainId,\n configuredSlaDomainName, logBackupFrequencyInSeconds, logRetentionHours,\n and copyOnly.\n * Introduced optional parameter keepMacAddresses to\n POST /vmware/vm/snapshot/{id}/mount, /vmware/vm/snapshot/{id}/export, and\n /vmware/vm/snapshot/{id}/instant_recovery endpints.\n This allows new VMs to have the same MAC address as their source VMs.\n\n ## Bug fixes:\n * Made path parameter required in GET /browse. Previously, an error was\n thrown when path was not passed in. This solves that bug.\n", + "x-logo": { + "url": "https://www.rubrik.com/wp-content/uploads/2016/11/Rubrik-Snowflake-small.png" + } + }, + "basePath": "/api/internal", + "schemes": [ + "https" + ], + "consumes": [ + "application/json" + ], + "paths": { + "/cluster/{id}/setupnetwork": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get the network setup status", + "description": "Retrieves the status of a network setup request for a cluster or node.", + "operationId": "getSetupNetworkStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "query", + "description": "ID of the network setup request.", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "Status of the network setup request.", + "schema": { + "$ref": "#/definitions/ClusterConfigStatus" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Rubrik CDM network setup for a cluster or node", + "description": "Issues a network setup request to a specified Rubrik cluster or node.", + "operationId": "setupNetwork", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "cluster_config", + "description": "Network configuration for the Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/ClusterConfig" + } + } + ], + "responses": { + "202": { + "description": "Request handle to check the network setup status.", + "schema": { + "$ref": "#/definitions/NodeOperation" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/organization/{id}/mssql": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized SQL Server resources in an organization", + "description": "Retrieve summary information for the explicitly authorized SQL Server resources of an organization. Information for a SQL Server resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedMssqlResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get SQL Server resources.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "organization_mssql" + } + }, + "/organization/{id}/nutanix": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Nutanix resources in an organization", + "description": "Retrieve summary information for the explicitly authorized Nutanix resources of an organization. Information for a Nutanix resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedNutanixResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Nutanix resources.", + "schema": { + "$ref": "#/definitions/NutanixHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_nutanix" + } + }, + "/polaris/replication/source/replicate_app/{snappable_id}": { + "post": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Replicate snapshots for the snappable of the specified ID", + "description": "Replicate snapshots for the snappable of the specified ID. The ID is the snappable ID and the snappable can be an AppBlueprint with its children EC2 instances.", + "operationId": "schedulePolarisSourcePullReplicateApp", + "parameters": [ + { + "name": "snappable_id", + "in": "path", + "description": "Snappable ID of which we are replicating snapshots.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Polaris source pull replicate definition.", + "required": true, + "schema": { + "$ref": "#/definitions/PolarisPullReplicateDefinition" + } + } + ], + "responses": { + "202": { + "description": "Polaris replication pull replicate succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/unmanaged_object/{id}/snapshot": { + "get": { + "tags": [ + "/unmanaged_object" + ], + "summary": "(DEPRECATED) Get summary of all the unmanaged snapshots for a given object", + "description": "Get summary of all the unmanaged snapshots for the object specified by ID. This endpoint will be removed in CDM v5.3 in favor of `GET /v1/unmanaged_object/{id}/snapshot`.", + "operationId": "queryUnmanagedObjectSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a object.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "search_value", + "in": "query", + "description": "Search snapshot by Date and Time.", + "required": false, + "type": "string" + }, + { + "name": "unmanaged_snapshot_type", + "in": "query", + "description": "Filter by Unmanaged Snapshot Type. Valid attributes are OnDemand, Retrieved, Relic, and Unprotected.", + "required": false, + "type": "string", + "enum": [ + "OnDemand", + "Retrieved", + "Relic", + "Unprotected" + ] + }, + { + "name": "before_date", + "in": "query", + "description": "Filter all the snapshots before a date.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "after_date", + "in": "query", + "description": "Filter all the snapshots after a date.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort by given attribute.", + "required": false, + "type": "string", + "enum": [ + "SnapshotDateAndTime", + "UnmanagedSnapshotType", + "LocalStorage", + "ArchiveStorage", + "RetentionSlaDomainName" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Get page summary about umanaged snapshots for a given object.", + "schema": { + "$ref": "#/definitions/UnmanagedSnapshotSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "unmanaged_object" + } + }, + "/fileset_template/bulk": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Create fileset templates", + "description": "Create fileset templates. The template is applied to the host. Each template is a set of paths on the host.\n\nA template uses full paths and wildcards to define the objects to include, exclude, and exempt from exclusion.\n\nThe **_exceptions_** value specifies paths that should not be excluded from the fileset by the **_exclude_** value.\n\nSpecify an array of full path descriptions for each property **_include_**, **_exclude_**, and **_exceptions_**.\n\nAcceptable wildcard characters are.\n+ **_\\*_** Single asterisk matches zero or more characters up to a path deliminator.\n+ **_\\*\\*_** Double asterisk matches zero or more characters.\n\nThe following rules apply to path descriptions.\n+ Accepts UTF-8 characters.\n+ Case sensitive.\n+ Forward slash character **_/_** is the path deliminator.\n+ Symbolic links must point to a subset of a non symbolic link path.\n+ Paths that do not start with **_/_** are modified to start with **_\\*\\*/_**.\n+ Paths that do not end with **_\\*_** are modified to end with **_/\\*\\*_**.", + "operationId": "bulkCreateFilesetTemplate", + "parameters": [ + { + "in": "body", + "name": "definitions", + "description": "Provide an array containing a separate object for each fileset template definition.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetTemplateCreate" + } + } + } + ], + "responses": { + "201": { + "description": "Summary information for the created fileset templates.", + "schema": { + "$ref": "#/definitions/FilesetTemplateDetailListResponse" + } + } + }, + "x-group": "fileset" + }, + "delete": { + "tags": [ + "/fileset" + ], + "summary": "Delete fileset templates", + "description": "Deletes specfied fileset templates. Detaches and retains all associated filesets as independent filesets with the existing values.", + "operationId": "bulkDeleteFilesetTemplate", + "parameters": [ + { + "name": "ids", + "in": "query", + "description": "Provide an array with the ID of each fileset template to remove.", + "required": true, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "preserve_snapshots", + "in": "query", + "description": "Flag to indicate whether to convert snapshots of filesets of the deleted templates to relics or to delete them. Applies to all templates being deleted. Default is true.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "204": { + "description": "Successfully removed all specified fileset templates." + }, + "404": { + "description": "Fileset template deletion failed for at least one fileset template." + } + }, + "x-group": "fileset" + }, + "patch": { + "tags": [ + "/fileset" + ], + "summary": "Modify fileset templates", + "description": "Modify the values of specified fileset templates.", + "operationId": "bulkUpdateFilesetTemplate", + "parameters": [ + { + "in": "body", + "name": "definitions", + "description": "Provide an array containing a separate object for each fileset template being modified.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetTemplatePatch" + } + } + } + ], + "responses": { + "200": { + "description": "Detailed information for modified fileset templates.", + "schema": { + "$ref": "#/definitions/FilesetTemplateDetailListResponse" + } + } + }, + "x-group": "fileset" + } + }, + "/oracle/host/{id}": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get Oracle Host information", + "description": "Retrieve detailed information for a specified Oracle Host object.", + "operationId": "getOracleHost", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle Host object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleHostDetail" + } + } + }, + "x-group": "oracle_host" + }, + "patch": { + "tags": [ + "/oracle" + ], + "summary": "Update an Oracle Host", + "description": "Update properties of an Oracle Host object.", + "operationId": "updateOracleHost", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle Host object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to use for the update of an Oracle Host object.", + "required": true, + "schema": { + "$ref": "#/definitions/OracleUpdate" + } + } + ], + "responses": { + "200": { + "description": "Successfully updated an Oracle Host object.", + "schema": { + "$ref": "#/definitions/OracleHostDetail" + } + } + }, + "x-group": "oracle_host" + } + }, + "/cloud_on/azure/storage_account": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Get all the storage accounts for an azure resource group", + "description": "Get all the storage accounts for an azure resource group.\n", + "operationId": "getAzureStorageAccounts", + "parameters": [ + { + "in": "body", + "name": "azure_storage_account_request", + "description": "An Azure storage account request that contains the credentials of the Azure storage account and compute proxy configuration.\n", + "required": true, + "schema": { + "$ref": "#/definitions/AzureStorageAccountRequest" + } + } + ], + "responses": { + "200": { + "description": "Returns a list of Azure storage account ID/name pairs. The ID is the unique identifier and the name is the display name.\n", + "schema": { + "$ref": "#/definitions/IdNamePairListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/stats/sla_domain_storage/{id}": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get storage used on cloud for the given SLA domain", + "description": "Get storage used on cloud for the given SLA domain.", + "operationId": "slaDomainStorage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "SLA Domain ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/config/usersettable_crystal": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Crystal configuration", + "description": "Fetch the global Crystal configuration.", + "operationId": "getUserSettableCrystalConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCrystalConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Crystal configuration", + "description": "Update the global Crystal configuration.", + "operationId": "updateUserSettableCrystalConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalCrystalConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCrystalConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/organization/envoy/user_info": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get Rubrik Envoy userInfo for the envoy vm", + "description": "Retrieve a Rubrik Envoy vm userInfo that is used to log in the envoy vm.", + "operationId": "getEnvoyUserInfo", + "parameters": [], + "responses": { + "200": { + "description": "Rubrik Envoy userInfo that is used to log in the envoy vm.", + "schema": { + "$ref": "#/definitions/EnvoyUserInfo" + } + } + }, + "x-group": "organization_resource" + } + }, + "/data_location/nfs/remove_bucket": { + "post": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Remove all buckets matching given prefix", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to remove all nfs buckets matching given prefix. Returns a list of buckets successfully removed. A support token is required for this operation.", + "operationId": "removeNfsBucket", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Remove bucket request configurations.", + "required": true, + "schema": { + "$ref": "#/definitions/RemoveNfsBucketRequest" + } + } + ], + "responses": { + "200": { + "description": "List of buckets removed.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "archival" + } + }, + "/unmanaged_object/snapshot/assign_sla": { + "post": { + "tags": [ + "/unmanaged_object" + ], + "summary": "Assign on-demand/unmanaged snapshots to a SLA Domain for managing retention synchronously", + "description": "Assign on-demand/unmanaged snapshots to the specified SLA Domain for managing retention.", + "operationId": "assignSlaToSnapshot", + "parameters": [ + { + "in": "body", + "name": "assignment_info", + "description": "Object specifying the SLA Domain ID and a comma-separated list of the snapshot IDs to assign to the SLA Domain.", + "required": true, + "schema": { + "$ref": "#/definitions/UnmanagedObjectSnapshotSlaAssignmentInfo" + } + } + ], + "responses": { + "204": { + "description": "Assigned on-demand/unmanaged snapshots to the specified SLA Domain for retention." + } + }, + "x-group": "unmanaged_object" + } + }, + "/cloud_on/validate/cloud_image_conversion": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Validate a snapshot for incremental conversion", + "description": "Runs validation on a snapshot for conversion, while reading the VM details populated by indexing in database. Can return not-ready response if the snapshot has not been indexed yet. Can be run on both snapshots and snappables, and in case of snappables, the last indexed snapshot is validated.\n", + "operationId": "queryValidateCloudImageConversion", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "ID of the snapshot to validate.", + "required": false, + "type": "string" + }, + { + "name": "snappable_id", + "in": "query", + "description": "ID of snappable to validate.", + "required": false, + "type": "string" + }, + { + "name": "cloud_provider", + "in": "query", + "description": "Cloud provider.", + "required": true, + "type": "string", + "enum": [ + "S3", + "Azure" + ] + } + ], + "responses": { + "200": { + "description": "Validation result.", + "schema": { + "$ref": "#/definitions/ValidationResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/archive/qstar": { + "delete": { + "tags": [ + "/archive" + ], + "summary": "REQUIRES SUPPORT TOKEN - Tries to clean up the data in the bucket in the QStar archival location", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to clean the buckets. A support token is required for this operation.", + "operationId": "cleanQstarBucket", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Archival definition.", + "required": true, + "schema": { + "$ref": "#/definitions/QstarLocationDefinitionWithCredential" + } + } + ], + "responses": { + "204": { + "description": "Returned if bucket was successfully emptied." + } + }, + "x-group": "archival" + }, + "post": { + "tags": [ + "/archive" + ], + "summary": "Add a QStar archival location", + "description": "Add a new QStar archival location object. Initiates an asynchronous job to connect to the archival location.", + "operationId": "createQstarLocation", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object that contains information about the specified QStar archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/QstarLocationDefinitionWithCredential" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to add a QStar archival location as an owner cluster.\n", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "x-group": "archival" + }, + "get": { + "tags": [ + "/archive" + ], + "summary": "Retrieve an array of QStar archival location objects", + "description": "Retrieve an array containing summary information for all QStar archival location objects.", + "operationId": "queryQstarLocations", + "parameters": [], + "responses": { + "200": { + "description": "Summary information for QStar archival locations.", + "schema": { + "$ref": "#/definitions/QstarLocationSummaryListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/vmware/agent": { + "get": { + "tags": [ + "/vmware/agent" + ], + "summary": "Get Rubrik Backup Service deployment setting", + "description": "Retrieve the global setting for automatic deployment of the Rubrik Backup Service to virtual machines.", + "operationId": "getVmAgentDeploymentSetting", + "parameters": [], + "responses": { + "200": { + "description": "Returned on successful retrieval of deployment setting.", + "schema": { + "$ref": "#/definitions/AgentDeploymentSettings" + } + } + }, + "x-group": "vm" + }, + "put": { + "tags": [ + "/vmware/agent" + ], + "summary": "Change the Rubrik Backup Service deployment setting", + "description": "Modify the global setting for automatic deployment of the Rubrik Backup Service to virtual machines.", + "operationId": "updateVmAgentDeploymentSetting", + "parameters": [ + { + "in": "body", + "name": "settings", + "description": "Modify the Rubrik Backup Service deployment setting for a specified virtual machine.", + "required": true, + "schema": { + "$ref": "#/definitions/AgentDeploymentSettings" + } + } + ], + "responses": { + "200": { + "description": "Returned on successful retrieval of deployment setting.", + "schema": { + "$ref": "#/definitions/AgentDeploymentSettings" + } + } + }, + "x-group": "vm" + } + }, + "/mssql/instance/count": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns a count of Microsoft SQL instances", + "description": "Returns a count of all Microsoft SQL instances. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/instance/count`.", + "operationId": "countMssqlInstance", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/stats/replication/outgoing/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get the total outgoing bandwidth to the replication clusters", + "description": "Get the total outgoing bandwidth to the replication clusters.", + "operationId": "replicationBandwidthOutgoing", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TimeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/host/envoy": { + "get": { + "tags": [ + "/host" + ], + "summary": "Get summary information for hosts with Envoy organization information", + "description": "Retrieve summary information for all hosts that are registered with a Rubrik cluster. Also returns organization information for each host. This information is available if Envoy is enabled on those hosts.", + "operationId": "queryHostEnvoy", + "parameters": [ + { + "name": "operating_system_type", + "in": "query", + "description": "Filter the summary information based on the operating system type. Accepted values are 'Windows', 'UnixLike', 'ANY', 'NONE'.\nUse **_NONE_** to only return information for hosts templates that do not have operating system type set.\nUse **_ANY_** to only return information for hosts that have operating system type set.", + "required": false, + "type": "string", + "enum": [ + "ANY", + "NONE", + "UnixLike", + "Windows" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filters the summary information based on the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "hostname", + "in": "query", + "description": "Returns hosts with hostnames that match the provided string in an infix search.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the host attribute to use in sorting the host summary information. Performs an ASCII sort of the summary information using the specified attribute, in the order specified.\nValid attributes are 'hostname'.", + "required": false, + "type": "string", + "enum": [ + "hostname" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary and organization information for registered hosts.", + "schema": { + "$ref": "#/definitions/HostSummaryListResponse" + } + } + }, + "x-group": "hosts" + } + }, + "/storage/array_volume_group/{id}/search": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Search volume group for a file", + "description": "Search for a file within a specified storage array volume group. The search string can be a prefix portion of the full path for the file or a prefix portion of the name of the file.", + "operationId": "searchStorageArrayVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "Search string that specifies either a prefix portion of the full path for the file or a prefix portion of the name of the file.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "File search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/vcd/vapp": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get summary for vApps", + "description": "Retrieve summary information for all vCD vApp objects. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp`.", + "operationId": "queryVcdVapps", + "parameters": [ + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the vCD vApp list on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Number of matches to ignore from the beginning of the results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for a vCD vApp object by name.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of vCD vApp object. Return both relic and non-relic vApps when this value is not specified.", + "required": false, + "type": "boolean" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "include_backup_task_info", + "in": "query", + "description": "Include backup task information in response.", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "200": { + "description": "Summary information for vCD vApps.", + "schema": { + "$ref": "#/definitions/VcdVappSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/cluster/{id}/disk": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get disks", + "description": "Retrieves information about the disks in the Rubrik cluster.", + "operationId": "getDisks", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of disks in this Rubrik cluster.", + "schema": { + "$ref": "#/definitions/DiskInfoListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/hyperv/scvmm": { + "get": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Get summary of all the Hyper-V SCVMMs", + "description": "Get summary of all the Hyper-V SCVMMs.", + "operationId": "queryHypervScvmm", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search SCVMM by SCVMM name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by the given attribute.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "List of Hyper-V SCVMM summaries.", + "schema": { + "$ref": "#/definitions/HypervScvmmSummaryListResponse" + } + } + }, + "x-group": "hyperv_scvmm" + }, + "post": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Register a Hyper-V SCVMM", + "description": "Register a Hyper-V SCVMM.", + "operationId": "registerScvmm", + "parameters": [ + { + "in": "body", + "name": "scvmm", + "description": "Registration definition for a Hyper-V SCVMM.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervScvmmRegister" + } + } + ], + "responses": { + "202": { + "description": "Status for the add Hyper-V SCVMM request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_scvmm" + } + }, + "/hyperv/vm/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Restore files from snapshot", + "description": "Restore files from a snapshot to the original source location.", + "operationId": "restoreHypervVirtualMachineSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a job to restore files from a snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervRestoreFilesConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of a request for an async restore job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/stats/total_physical_storage/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Returns a time series of the total physical storage used", + "description": "Returns a time series of the total physical storage used.", + "operationId": "localStorageTimeseries", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Specifies the range of the time series as an interval into the past from the present. The format for the interval is -Xu, where X is an integer and u is the unit. Valid units are s, min, h, d, w, mon and y.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a time series of the bytes used per day.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "internal_report" + } + }, + "/aws/ec2_instance/{id}/search": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Search for file in an EC2 instance", + "description": "Search across all the snapshots for a file within the EC2 instance. Search by using a full path prefix or a filename prefix.", + "operationId": "searchAwsEc2Instance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the EC2 instance object.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "String value to use when searching an EC2 instance.The value can be a 'path prefix' that narrows the search range to all files that are hierarchically beneath the specified path. Or, the value can be a 'filename prefix' that narrows the search range to all files with a filename that starts with the specified string value.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results for specified EC2 instance.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/event_series/{id}/status": { + "get": { + "tags": [ + "/event_series" + ], + "summary": "(CAUTION! WE ARE DEPRECATING THIS ENDPOINT) Get information for a specific event", + "description": "Use the event series ID and jobInstanceId for an event to retrieve information about the event, including ID, progress, cancelable, cancel pending, and the number of times the event has been attempted. JobInstanceId can be left unspecified in case of a non job related event, but is expected for a job related event.", + "operationId": "eventStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Event series ID (event_series_id) of the event.", + "required": true, + "type": "string" + }, + { + "name": "job_instance_id", + "in": "query", + "description": "The job instance ID (job_instance_id) of the corresponding job instance.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Information for the event specified by event_series_id.", + "schema": { + "$ref": "#/definitions/EventStatusSummary" + } + } + }, + "x-group": "events" + } + }, + "/polaris/replication/source/{id}/metadata": { + "get": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Get metadata summary replicated from this Polaris source", + "description": "Get a summary of remote metadata that exists on this cluster which has the Polaris source identified by this Polaris Id as the primary cluster, so that the Polaris side can prepare the metadata package for the later source refresh job.", + "operationId": "getReplicatedPolarisSnappableSummaries", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of the Polaris replication source.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get summary of all replicated snappables from this Polaris source.", + "schema": { + "type": "array", + "items": { + "description": "List of summaries of replicated snappables.", + "$ref": "#/definitions/PolarisReplicationSourceReplicatedSnappableSummary" + } + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/archive/location/{id}/reader/promote": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Take ownership of a reader archival location", + "description": "Start an asynchronous job to make the current Rubrik cluster the owner of a reader archival location. After successful completion, the Rubrik cluster that was the previous owner will not have access to the archival location.\n", + "operationId": "promoteArchivalLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an archival location object.", + "required": true, + "type": "string" + }, + { + "name": "refresh_before_promote", + "in": "query", + "description": "Boolean value to indicate whether to refresh the archival location before promoting it to an owner location. If not specified, this defaults to true. This should only be set to false if you are certain nothing has changed on the archival location since the last refresh.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to take ownership of a specified archival location.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/replication/target/{id}": { + "delete": { + "tags": [ + "/replication" + ], + "summary": "Remove the replication target", + "description": "Remove the replication target from this cluster.", + "operationId": "deleteReplicationTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the replication target.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully removed the replication target." + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "replication" + }, + "get": { + "tags": [ + "/replication" + ], + "summary": "Get summary for the replication target", + "description": "Retrieve the ID, name, and address of the replication target.", + "operationId": "getReplicationTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the replication target.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary details for the replication target.", + "schema": { + "$ref": "#/definitions/ReplicationTargetSummary" + } + } + }, + "x-group": "replication" + }, + "patch": { + "tags": [ + "/replication" + ], + "summary": "Update the replication target information", + "description": "Update the setup information, address, username, and password for the replication target.", + "operationId": "updateReplicationTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the replication target.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update", + "description": "Object containing updated replication target information.", + "required": true, + "schema": { + "$ref": "#/definitions/ReplicationTargetUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns summary of updated replication target.", + "schema": { + "$ref": "#/definitions/ReplicationTargetSummary" + } + } + }, + "x-group": "replication" + } + }, + "/aws/ec2_instance/{id}/missed_snapshot": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get missed snapshots for an EC2 instance", + "description": "Retrieve details about missed policy-driven snapshots for an EC2 instance.", + "operationId": "queryAwsEc2InstanceMissedSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an EC2 instance object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Missed snapshots for an EC2 instance.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/cluster/{id}/bootstrap_config": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Retrieve Rubrik cluster configuration", + "description": "Retrieves configuration information for the nodes of the Rubrik cluster.", + "operationId": "getBootstrapConfig", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Rubrik cluster configuration information.", + "schema": { + "$ref": "#/definitions/BootstrapConfig" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/managed_volume/{id}/end_snapshot": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "End managed volume snapshot", + "description": "Close a managed volume for writes. A snapshot will be created containing all writes since the last begin-snapshot call.", + "operationId": "closeWrites", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of managed volume.", + "required": true, + "type": "string" + }, + { + "name": "owner_id", + "in": "query", + "description": "A string representing the owner of a snapshot. The end snapshot request will fail if the owner of the in-flight snapshot is different from the one specified in the request.", + "required": false, + "type": "string" + }, + { + "name": "end_snapshot_delay_in_seconds", + "in": "query", + "description": "Specifies an interval in seconds. The snapshot will not end before the specified interval elapses. When this value is defined, the end snapshot operation happens asynchronously after the API call returns.", + "required": false, + "type": "integer", + "format": "int32", + "x-hidden": true + }, + { + "in": "body", + "name": "params", + "description": "Snapshot parameters. By default, all managed volume snapshots will follow the SLA assigned to the managed volume. If this snapshot should follow a different SLA, specify the override SLA ID here. Note that assigning an override SLA will make this an on-demand snapshot.", + "required": false, + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotConfig" + } + } + ], + "responses": { + "200": { + "description": "Returns ID of a snapshot.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotSummary" + } + } + }, + "x-group": "managed_volume" + } + }, + "/vcd/hierarchy/{id}/children": { + "get": { + "tags": [ + "/vcd/hierarchy" + ], + "summary": "(DEPRECATED) Get immediate descendant objects", + "description": "Retrieve the list of immediate descendant objects for the specified parent. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/hierarchy/{id}/children`.", + "operationId": "getVcdHierarchyChildren", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent vCD hierarchy object. To get top-level nodes, use **root** as the ID.", + "required": true, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment", + "ConnectionStatus", + "VappCount" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Number of matches to ignore from the beginning of the results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search object by object name.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of vCD vApp hierarchy object. Return both relic and non-relic children when this value is not specified.", + "required": false, + "type": "boolean" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "Cluster", + "VimServer", + "Org", + "OrgVdc", + "Catalog", + "vApp" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters vCD hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of immediate descendant objects.", + "schema": { + "$ref": "#/definitions/VcdHierarchyObjectSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_hierarchy" + } + }, + "/mssql/db/bulk": { + "patch": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Update multiple Microsoft SQL databases", + "description": "Update multiple Microsoft SQL databases with the specified properties. This endpoint will be removed in CDM v6.0 in favor of `PATCH v1/mssql/db/bulk`.", + "operationId": "bulkUpdateMssqlDb", + "parameters": [ + { + "in": "body", + "name": "dbs_update_properties", + "description": "Properties to update for each database.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/MssqlDbUpdateId" + } + } + } + ], + "responses": { + "200": { + "description": "Returns a detailed view of all updated databases.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/MssqlDbDetail" + } + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/managed_object/{managed_id}/descendants": { + "get": { + "tags": [ + "/managed_object" + ], + "summary": "Gets the summaries of a managed object's descendants", + "description": "Gets the summaries of all the descendants of a managed object.", + "operationId": "getObjectDescendants", + "parameters": [ + { + "name": "managed_id", + "in": "path", + "description": "ManagedID of object whose descendants to get.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "object_type", + "in": "query", + "description": "Comma-separated list of objectType values by which to search (all if not specified): ComputeCluster|DataCenter|Fileset|Folder|Global|Host|MssqlDatabase|MssqlInstance|VirtualMachine|VmwareHost|vCenter.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "operating_system_type", + "in": "query", + "description": "Comma-separated list of operatingSystemType values by which to search (all if not specified): Linux|Windows.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "offset", + "in": "query", + "description": "Starting offset of the results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or local.", + "required": false, + "type": "string" + }, + { + "name": "search_attr", + "in": "query", + "description": "Comma-separated list of attributes by which to search: name|hostname.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "search_value", + "in": "query", + "description": "Comma-separated list of values by which to search (one for each search_attr).", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "search_type", + "in": "query", + "description": "Comma-separated list of search types (one for each search_attr): prefix|infix|exact.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "join_operator", + "in": "query", + "description": "Whether results must match any or all of the search attributes: all|any (default is \"all\").", + "required": false, + "type": "string" + }, + { + "name": "sort_attr", + "in": "query", + "description": "Comma-separated list of attributes by which to sort: name|objectType|hostname.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "sort_order", + "in": "query", + "description": "Comma-separated list of sort orders (one for each sort_attr): asc|desc.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Summaries of each descendant object.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "managed_object" + } + }, + "/cluster/{id}/remove_node": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Removes a node from a Rubrik cluster", + "description": "Schedules a node for removal.", + "operationId": "removeNode", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "node_id", + "description": "Id of the node to remove.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Request handle to check the status of the node removal.", + "schema": { + "$ref": "#/definitions/NodeOperation" + } + } + }, + "x-group": "cluster" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get node removal status", + "description": "Retrieves the status of the remove node request.", + "operationId": "removeNodeStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "query", + "description": "Remove node request id.", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "Remove node request status.", + "schema": { + "$ref": "#/definitions/RemoveNodeStatus" + } + } + }, + "x-group": "cluster" + } + }, + "/hyperv/scvmm/{id}": { + "delete": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Delete a Hyper-V SCVMM", + "description": "Delete a Hyper-V SCVMM.", + "operationId": "deleteHypervScvmm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V SCVMM.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Request ID of the scheduled Hyper-V SCVMM delete job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_scvmm" + }, + "get": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Get details of a Hyper-V SCVMM", + "description": "Get details of a Hyper-V SCVMM.", + "operationId": "getHypervScvmm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V SCVMM.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details about the Hyper-V SCVMM.", + "schema": { + "$ref": "#/definitions/HypervScvmmDetail" + } + } + }, + "x-group": "hyperv_scvmm" + }, + "patch": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Update a Hyper-V SCVMM", + "description": "Update SCVMM with specified properties.", + "operationId": "updateHypervScvmm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Hyper-V SCVMM.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervScvmmUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about virtual machine.", + "schema": { + "$ref": "#/definitions/HypervScvmmDetail" + } + } + }, + "x-group": "hyperv_scvmm" + } + }, + "/archive/nfs": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Add a new NFS archival location", + "description": "Add a new NFS archival location. Initiate an asynchronous job to connect to the archival location target.\n", + "operationId": "createNfsLocation", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/NfsLocationCreationDefinition" + } + } + ], + "responses": { + "202": { + "description": "Returns the job ID for connecting to a new NFS archival location.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "x-group": "archival" + }, + "get": { + "tags": [ + "/archive" + ], + "summary": "Get NFS archival locations", + "description": "Retrieve an array of NFS archival location objects for the cluster.\n", + "operationId": "queryNfsLocations", + "parameters": [], + "responses": { + "200": { + "description": "Returns an array of NFS archival location objects.", + "schema": { + "$ref": "#/definitions/NfsLocationDetailListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/job/deleteReplicationSource": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - deletes a replication source cluster on the target cluster", + "description": "REQUIRES SUPPORT TOKEN - Deletes a replication source cluster on the target cluster. A support token is required for this operation.", + "operationId": "scheduleDeleteReplicationSourceJob", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the delete replication source job.", + "required": true, + "schema": { + "$ref": "#/definitions/DeleteReplicationSourceJobConfig" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "string" + } + } + }, + "x-group": "replication" + } + }, + "/snapshot/expire": { + "post": { + "tags": [ + "/snapshot" + ], + "summary": "REQUIRES SUPPORT TOKEN - Expire a snapshot", + "description": "REQUIRES SUPPORT TOKEN - Will mark the snapshot as expired. A support token is required for this operation.", + "operationId": "internalExpireSnapshot", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "ID of the Snapshot that needs to be expired locally.", + "required": true, + "schema": { + "$ref": "#/definitions/InternalExpireSnapshotConfig" + } + } + ], + "responses": { + "200": { + "description": "Return the expire job instance id on success.", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Returned if snapshot does not exist.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Returned if expire snapshot request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_snapshot" + } + }, + "/archive/location/{id}/owner/pause": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Pause archiving", + "description": "Pause archiving to a specified active archival location of the current Rubrik cluster.\n", + "operationId": "pauseArchivalLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an archival location object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Archival location was paused successfully." + } + }, + "x-group": "archival" + } + }, + "/session/realm/{name}": { + "post": { + "tags": [ + "/session" + ], + "summary": "Create user session in realm", + "description": "Open a user session in realm.", + "operationId": "createSessionWithRealmV2", + "parameters": [ + { + "name": "name", + "in": "path", + "description": "Bind the new session to the specified LDAP service.\n", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "session_request", + "description": "Create session request.", + "required": true, + "schema": { + "$ref": "#/definitions/SessionRequest" + } + } + ], + "responses": { + "200": { + "description": "Details about the user session.", + "schema": { + "$ref": "#/definitions/SessionResponse" + } + } + }, + "x-group": "session", + "x-rk-block-api-tokens": true, + "x-rk-primary-auth-ok": true + } + }, + "/snapshot/{id}/storage/stats": { + "get": { + "tags": [ + "/snapshot" + ], + "summary": "(DEPRECATED) Use the same API in V1 version. Returns storage stats for a snapshot. Note that the corresponding V1 API doesn't take snappable_id as an input parameter", + "description": "Returns the storage statistics for a snapshot.", + "operationId": "getSnapshotStorageStats", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + }, + { + "name": "snappable_id", + "in": "query", + "description": "Snappable ID of target object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns storage stats for a snapshot.", + "schema": { + "$ref": "#/definitions/SnapshotStorageStats" + } + } + }, + "deprecated": true, + "x-group": "internal_snapshot" + } + }, + "/host/count": { + "get": { + "tags": [ + "/host" + ], + "summary": "Count all hosts", + "description": "Retrieve the total number of hosts, and the total number of protected hosts. A protected host has at least one protected fileset.", + "operationId": "countHost", + "parameters": [ + { + "name": "operating_system_type", + "in": "query", + "description": "Filter the summary information based on the operating system type. Values are 'UnixLike', 'Windows', 'ANY', or 'NONE'.\nUse **_ANY_** to return the total number of hosts that have any operating system type set.\nUse **_NONE_** to return the total number of hosts that do not have operating system type set.", + "required": false, + "type": "string", + "enum": [ + "UnixLike", + "Windows", + "ANY", + "NONE" + ] + }, + { + "name": "share_type", + "in": "query", + "description": "Filter the summary information based on the network share type. Values are 'SMB', 'NFS', 'ANY', or 'NONE'.\nUse **_ANY_** to return the total number of hosts that have any network share type set.\nUse **_NONE_** to return the total number of hosts that do not have network share type type set.", + "required": false, + "type": "string", + "enum": [ + "SMB", + "NFS", + "ANY", + "NONE" + ] + } + ], + "responses": { + "200": { + "description": "Total number of hosts and total number of protected hosts.", + "schema": { + "$ref": "#/definitions/ProtectedObjectsCount" + } + } + }, + "x-group": "hosts" + } + }, + "/cluster/{id}/ui_preference": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get UI preference of cluster", + "description": "Get UI preference of queried cluster.", + "operationId": "getUiPreference", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/ClusterUiPreference" + } + } + }, + "x-group": "cluster" + }, + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Update UI preference of cluster", + "description": "Update UI preference of cluster.", + "operationId": "updateUiPreference", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "ui_preference", + "description": "updated UI preference.", + "required": true, + "schema": { + "$ref": "#/definitions/ClusterUiPreferenceUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returned if the update was successful.", + "schema": { + "$ref": "#/definitions/ClusterUiPreference" + } + } + }, + "x-group": "cluster" + } + }, + "/vcd/cluster/{id}/refresh": { + "post": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Refresh a vCD Cluster", + "description": "Start an asynchronous job to refresh the metadata for a specified vCD Cluster object. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/cluster/{id}/refresh`.", + "operationId": "refreshVcdCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vCD Cluster object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a vCD Cluster metadata refresh job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + } + }, + "/vcd/vapp/request/{id}": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get vApp job status", + "description": "Retrieve the details of a specified asynchronous job for a vApp. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/request/{id}`.", + "operationId": "getVappAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a vApp asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/vcd/vapp/snapshot/{id}/download": { + "post": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Download snapshot from archive", + "description": "Provides a method for retrieving a snapshot, that is not available locally, from an archival location. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/vapp/snapshot/{id}/download`.", + "operationId": "createVcdVappDownloadSnapshotFromCloud", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/archive/dca/reader/connect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Connect to a DCA archival location as a reader", + "description": "Connect to an existing DCA archival location as a reader. Initiates an asynchronous job to connect to the archival location.\n", + "operationId": "connectDcaLocationAsReader", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Object that contains information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaReaderConnectDefinition" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to connect to a DCA archival location as a reader cluster.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/cluster/{id}/network_interface": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get network interfaces for a Rubrik Cluster cluster", + "description": "Retrieves network interfaces(including VLANs) on bond0/bond1.", + "operationId": "getNetworkInterface", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "interface", + "in": "query", + "description": "Will retrieve info for a specific interface if passed in.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Network interfaces for the Rubrik cluster.", + "schema": { + "$ref": "#/definitions/NetworkInterfaceListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/nutanix/vm/{id}/snapshot": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Create on-demand VM snapshot", + "description": "Create an on-demand snapshot for the given VM ID.", + "operationId": "createOnDemandNutanixBackup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the VM.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand backup.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the backup request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + }, + "delete": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Delete all snapshots of VM", + "description": "Delete all snapshots of a virtual machine.", + "operationId": "deleteNutanixSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Virtual machine ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "x-group": "nutanix_vm" + }, + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get list of snapshots of VM", + "description": "Retrieve the following information for all snapshots for a VM: ID, snapshot date, expiration date, type of source object, name of VM, type of snapshot, state of the cloud, level of consistency, name of snapshot VM, index state, total number of files, IDs of all replication location, IDs of all archival locations.", + "operationId": "queryNutanixSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary information for all snapshots.", + "schema": { + "$ref": "#/definitions/NutanixVmSnapshotSummaryListResponse" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/vcd/cluster/{id}": { + "delete": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Remove vCD Cluster", + "description": "Start an asynchronous job to remove a vCD Cluster object. This endpoint will be removed in CDM v6.1 in favor of `DELETE v1/vcd/cluster/{id}`.", + "operationId": "deleteVcdCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vCD Cluster object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a job to delete a vCD Cluster.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + }, + "get": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Get vCD Cluster details", + "description": "Retrieve detailed information for a vCD Cluster object. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/cluster/{id}`.", + "operationId": "getVcdCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vCD Cluster object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a vCD Cluster object.", + "schema": { + "$ref": "#/definitions/VcdClusterDetail" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + }, + "patch": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Change vCD Cluster object", + "description": "Modify the hostname and credentials of a specified vCD Cluster object. This endpoint will be removed in CDM v6.1 in favor of `PATCH v1/vcd/cluster/{id}`.", + "operationId": "updateVcdCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vCD Cluster object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "patch_properties", + "description": "Updated hostname and credentials for a specified vCD Cluster object.", + "required": true, + "schema": { + "$ref": "#/definitions/VcdClusterPatch" + } + } + ], + "responses": { + "200": { + "description": "Details of an updated vCD Cluster object.", + "schema": { + "$ref": "#/definitions/VcdClusterDetail" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + } + }, + "/report": { + "post": { + "tags": [ + "/report" + ], + "summary": "Create a new report", + "description": "Create a new report by specifying one of the report templates.", + "operationId": "createReport", + "parameters": [ + { + "in": "body", + "name": "report_config", + "description": "Report. name and Report template.", + "required": true, + "schema": { + "$ref": "#/definitions/ReportCreate" + } + } + ], + "responses": { + "201": { + "description": "Returned if report successfully created.", + "schema": { + "$ref": "#/definitions/ReportDetail" + } + } + }, + "x-group": "internal_report" + }, + "get": { + "tags": [ + "/report" + ], + "summary": "Get summary information for all reports", + "description": "Retrieve summary information for each report. Optionally, filter the retrieved information.", + "operationId": "queryReports", + "parameters": [ + { + "name": "report_template", + "in": "query", + "description": "Provides the name of a report template for filtering the reports returned by a request based on that name.", + "required": false, + "type": "string", + "enum": [ + "CapacityOverTime", + "ObjectProtectionSummary", + "ObjectTaskSummary", + "ObjectIndexingSummary", + "ProtectionTasksDetails", + "ProtectionTasksSummary", + "RecoveryTasksDetails", + "SlaComplianceSummary", + "SystemCapacity" + ] + }, + { + "name": "report_type", + "in": "query", + "description": "Filter the returned reports based off the reports type.", + "required": false, + "type": "string", + "enum": [ + "Canned", + "Custom" + ] + }, + { + "name": "name", + "in": "query", + "description": "Filter the returned reports based off their name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the returned reports based off the specified attribute. Default: name.", + "required": false, + "type": "string", + "default": "name", + "enum": [ + "name", + "reportTemplate", + "reportType" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order by which to sort the returned reports. Default: asc.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for reports.", + "schema": { + "$ref": "#/definitions/ReportSummaryListResponse" + } + } + }, + "x-group": "internal_report" + } + }, + "/cluster/{id}/discover": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "(DEPRECATED) Discover bootstrappable nodes", + "description": "Searches for nodes that can bootstrap into the specified Rubrik cluster. This endpoint will be moved to v1 in the next major version.", + "operationId": "discover", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of nodes and associated IPv6 addresses available to bootstrap into the specified Rubrik cluster.", + "schema": { + "$ref": "#/definitions/BootstrappableNodeInfoListResponse" + } + } + }, + "deprecated": true, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/aws/ec2_instance/{ec2_instance_id}/storage_volume/{id}": { + "patch": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Update storage volume protection", + "description": "Update whether to take snapshots of a specified AWS storage volume object.", + "operationId": "updateAwsEc2InstanceStorageVolume", + "parameters": [ + { + "name": "ec2_instance_id", + "in": "path", + "description": "ID assigned to an EC2 instance object.", + "required": true, + "type": "string" + }, + { + "name": "id", + "in": "path", + "description": "ID assigned to an AWS storage volume object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Properties to update on a specified AWS storage volume object.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstanceStorageVolumeUpdateConfig" + } + } + ], + "responses": { + "200": { + "description": "Updated AWS storage volume object.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceStorageVolumeDetail" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/cluster/{id}/security/rksupport_cred": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Check status for updating cluster-wide rksupport credential", + "description": "Check whether the specified Rubrik cluster should be updated with a cluster-wide rksupport credential.", + "operationId": "fetchRksupportCredentialStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or use *me* for the Rubrik cluster that is hosting the current session.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Retune the status of rksupport credential.", + "schema": { + "$ref": "#/definitions/RksupportCredStatus" + } + } + }, + "x-group": "security" + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Schedule job for updating cluster-wide rksupport credential", + "description": "Update the rksupport credential by one of following two:\n 1. username/password of support portal (works for sites connected to\n the Internet).\n 2. the registration details acquired from support portal\n (works for sites that cannot connect to the Internet).\n", + "operationId": "updateClusterRksupportCredential", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or use *me* for the Rubrik cluster that is hosting the current session.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "rksupport_cred_update_details", + "description": "Details for updating the cluster-wide rksupport crednetial.\nWe need at least of one following two fields to be provided.\n 1. communityUserCredentials (works for sites connected to\n the Internet).\n 2. registrationDetails (works for sites that cannot connect to\n the Internet).\n", + "required": true, + "schema": { + "$ref": "#/definitions/RksupportCredUpdateDetails" + } + } + ], + "responses": { + "202": { + "description": "Schedule job for updating cluster-wide rksupport credential.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "security" + } + }, + "/organization/{id}/mssql/db/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get mssql database metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getMssqlDbMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specify the organization id.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_mssql" + } + }, + "/cluster/{id}/security/request/{request_id}": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get details about an async request", + "description": "Get details about a security-related async request.", + "operationId": "getAsyncRequestStatusForClusterSecurity", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "security" + } + }, + "/volume_group/snapshot/{id}/browse": { + "get": { + "tags": [ + "/volume_group" + ], + "summary": "Lists all files in Volume Group snapshot", + "description": "Lists all files and directories in a given path.", + "operationId": "browseVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path of the starting point for the directory listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "volume_group" + } + }, + "/cluster/{id}/iscsi": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Add iscsi targets", + "description": "Add iscsi targets from the specificied portal to the Rubrik cluster.", + "operationId": "addIscsi", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "iscsi_portal_info", + "description": "Iscsi target portal information.", + "required": true, + "schema": { + "$ref": "#/definitions/IscsiAddParams" + } + } + ], + "responses": { + "204": { + "description": "Successfully added to the specified cluster." + } + }, + "x-group": "cluster" + } + }, + "/cluster/{id}/is_on_cloud": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get is on-cloud", + "description": "Check whether this Rubrik cluster is running on cloud.", + "operationId": "isOnCloud", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "True when the Rubrik cluster is running on cloud.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/user/{id}": { + "delete": { + "tags": [ + "/user" + ], + "summary": "ADMIN ONLY: Delete a user for the given id", + "description": "Delete a user for the given id.", + "operationId": "deleteLocalUser", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the user to be deleted.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "string" + } + } + }, + "x-group": "user" + }, + "get": { + "tags": [ + "/user" + ], + "summary": "ADMIN ONLY: Details about the specific User", + "description": "To be used by Admin to fetch details about the specific User.", + "operationId": "getUser", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the User that needs to be fetched. Pass in 'me' for getting the current logged in user.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/UserDetail" + } + } + }, + "x-group": "user" + }, + "patch": { + "tags": [ + "/user" + ], + "summary": "ADMIN ONLY: Update existing User", + "description": "To be used by Admin to update existing User.", + "operationId": "updateUser", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the User that needs to be updated.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "user_info", + "description": "User info to be updated.", + "required": true, + "schema": { + "$ref": "#/definitions/UserUpdateInfo" + } + } + ], + "responses": { + "200": { + "description": "Returns updated details about the user.", + "schema": { + "$ref": "#/definitions/UserDetail" + } + } + }, + "x-group": "user", + "x-rk-block-api-tokens": true + } + }, + "/aws/ec2_instance/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Download files from EC2 instance snapshot", + "description": "Initiate a download request for files in a specified EC2 instance snapshot.", + "operationId": "downloadAwsEc2InstanceSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for file download from an EC2 instance snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstanceDownloadFilesConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the file download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/nutanix/vm/snapshot/{id}/browse": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Lists all files in VM snapshot", + "description": "Lists all files and directories in a given path.", + "operationId": "browseNutanixSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path of the starting point for the directory listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/stats/per_mount_storage/{id}": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get storage footprint on rubrik for a given mount", + "description": "Get storage footprint on rubrik for a given mount.", + "operationId": "perMountStorage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Mount ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/diagnostic/snappable/{id}": { + "get": { + "tags": [ + "/diagnostic" + ], + "summary": "Get diagnostic information on tasks of a snappable", + "description": "Get diagnostic information on tasks of a snappable.", + "operationId": "getSnappableDiagnosticDetail", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of the snappable.", + "required": true, + "type": "string" + }, + { + "name": "before_date", + "in": "query", + "description": "Filter all the task actions before a date.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "after_date", + "in": "query", + "description": "Filter all task actions after a date.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "task_type", + "in": "query", + "description": "Filter task actions with task types.", + "required": false, + "type": "string", + "enum": [ + "Backup", + "LogBackup", + "Replication", + "LogReplication", + "Archival", + "ArchivalTiering", + "LogArchival", + "LogShipping", + "Instantiate", + "LiveMount", + "InstantRecovery", + "Export", + "Restore", + "InPlaceRecovery", + "DownloadFile", + "RestoreFile", + "Conversion", + "Index", + "Validation" + ] + } + ], + "responses": { + "200": { + "description": "Return diagnostic details on tasks of the snappable.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TaskDiagnosticInfo" + } + } + } + }, + "x-group": "internal_diagnostic" + } + }, + "/polaris/replication/source/request/{id}": { + "get": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Get Polaris replication job status", + "description": "Retrieve the details of a specified asynchronous Polaris replication job.", + "operationId": "getPolarisReplicationAsyncJobStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job. Should be a composite ID of both the job ID and the instance ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of a Polaris replication asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/organization/{id}/vcd/vapp/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get vCD vApp metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getVcdVappMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_vcd" + } + }, + "/oracle/db/{id}/missed_recoverable_range": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get missed recoverable ranges of a Oracle database", + "description": "Retrieve a list of missed recoverable ranges for a Oracle database. For each run of one type of error, the first and last occurrence of the error are given.", + "operationId": "getOracleDbMissedRecoverableRanges", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Oracle database.", + "required": true, + "type": "string" + }, + { + "name": "after_time", + "in": "query", + "description": "Filter the missed ranges to end after this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "before_time", + "in": "query", + "description": "Filter the missed ranges to start before this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + } + ], + "responses": { + "200": { + "description": "Returns the missed recoverable ranges for the Oracle database.", + "schema": { + "$ref": "#/definitions/OracleMissedRecoverableRangeListResponse" + } + } + }, + "x-group": "oracle_db" + } + }, + "/oracle/hierarchy/{id}": { + "get": { + "tags": [ + "/oracle/hierarchy" + ], + "summary": "Get summary of a hierarchy object", + "description": "Retrieve details for the specified hierarchy object.", + "operationId": "getOracleHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of the hierarchy object.", + "schema": { + "$ref": "#/definitions/OracleHierarchyObjectSummary" + } + } + }, + "x-group": "oracle_hierarchy" + } + }, + "/vcd/vapp/snapshot/{snapshot_id}/instant_recover": { + "post": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Instant Recovery of vApp virtual machines", + "description": "Use Instant Recovery to recover specified vApp virtual machines. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/vapp/snapshot/{snapshot_id}/instant_recover`.", + "operationId": "createVappInstantRecovery", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID assigned to the vApp snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a request to recover specified virtual machines from a vApp snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/VappInstantRecoveryJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for async Instant Recovery job for virtual machines in a vApp snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/node/{id}/io_stats": { + "get": { + "tags": [ + "/node" + ], + "summary": "Iops and IO throughput of the node", + "description": "Iops and IO throughput of the node.", + "operationId": "getNodeIo", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of node to fetch IO throughput stats for.", + "required": true, + "type": "string" + }, + { + "name": "range", + "in": "query", + "description": "Optional starting point for a time series. The starting point is expressed as -, where is an integer and is one of: s(seconds), m(minutes), h(hours), d(days). Default value is -6h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "IO stats for the node.", + "schema": { + "$ref": "#/definitions/IoStat" + } + } + }, + "x-group": "node" + } + }, + "/managed_object/{managed_id}/summary": { + "get": { + "tags": [ + "/managed_object" + ], + "summary": "Gets the summary of a managed object", + "description": "Gets the summary of a managed object.", + "operationId": "getObjectSummary", + "parameters": [ + { + "name": "managed_id", + "in": "path", + "description": "ManagedID of object whose summary to get.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary of managed object.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummary" + } + } + }, + "x-group": "managed_object" + } + }, + "/hierarchy/bulk_sla_conflicts": { + "post": { + "tags": [ + "/hierarchy" + ], + "summary": "(DEPRECATED) Retrieve the list of descendant objects with SLA conflicts in bulk using the V1 version", + "description": "Retrieve the list of descendant objects with an explicitly configured SLA Domain, or inherit an SLA Domain from a different parent for each of the managed IDs.", + "operationId": "bulkHierarchySlaConflicts", + "parameters": [ + { + "in": "body", + "name": "hierarchy_object_ids", + "description": "A list of the IDs of the hierarchy objects.", + "required": true, + "schema": { + "$ref": "#/definitions/HierarchyObjectIds" + } + } + ], + "responses": { + "200": { + "description": "List of SLA Domain conflict summaries for the specified managed IDs.\n", + "schema": { + "$ref": "#/definitions/SlaConflictsSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "hierarchy" + } + }, + "/storage/array_volume_group/snapshot/{id}/download": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Download an archival snapshot for a volume group", + "description": "Initiate an asynchronous job to download a specified volume group snapshot from an archival location.", + "operationId": "createDownloadSnapshotForStorageArrayVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot of a storage array volume group.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous job for an archival download.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/host/bulk/volume_filter_driver": { + "post": { + "tags": [ + "/host/bulk/volume_filter_driver" + ], + "summary": "Install or uninstall volume filter driver on hosts", + "description": "Install or uninstall volume filter driver on given hosts. Upgrades volume filter driver if a older version is present on the host.", + "operationId": "changeVfdOnHost", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Host volume filter driver install definition.", + "required": true, + "schema": { + "$ref": "#/definitions/HostVfdInstallRequest" + } + } + ], + "responses": { + "200": { + "description": "Installation status response of each host with errors if any.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/HostVfdInstallResponse" + } + } + } + }, + "x-group": "hosts" + } + }, + "/organization/{id}/managed_volume/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get managed volume metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getManagedVolumeMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specify the organization id.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_managed_volume" + } + }, + "/cluster/{id}/ipv6": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Returns the IPv6 of all nodes on one or all interfaces", + "description": "Returns the IPv6 of all nodes on one or all interfaces.", + "operationId": "getIpv6", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "iface", + "in": "query", + "description": "Optional interface name parameter.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/Ipv6ConfigurationListResponse" + } + } + }, + "x-group": "cluster" + }, + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Update IPv6 configuration on an interface", + "description": "Update IPv6 configuration on an interface.", + "operationId": "updateIpv6", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "ipv6_config", + "description": "IPv6 configuration for an interface.", + "required": true, + "schema": { + "$ref": "#/definitions/Ipv6Configuration" + } + } + ], + "responses": { + "200": { + "description": "Returned if the update was successful.", + "schema": { + "$ref": "#/definitions/Ipv6Configuration" + } + } + }, + "x-group": "cluster" + } + }, + "/data_location/replication_source/delete_job": { + "get": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Get the details of all one-off delete replication source jobs", + "description": "REQUIRES SUPPORT TOKEN - Get the details of all one-off delete replication source jobs. A support token is required for this operation.", + "operationId": "getDeleteReplicationSourceJobs", + "parameters": [], + "responses": { + "200": { + "description": "List of scheduled delete replication source jobs.", + "schema": { + "$ref": "#/definitions/DeleteReplicationSourceJobListResponse" + } + } + }, + "x-group": "replication" + } + }, + "/cluster/{id}/dns_nameserver": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get DNS servers", + "description": "Retrieve a list the DNS servers assigned to the Rubrik cluster.", + "operationId": "getClusterDnsNameservers", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of the DNS servers assigned to the specified Rubrik cluster.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "cluster" + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Assign DNS servers", + "description": "Assign DNS servers to the Rubrik cluster.", + "operationId": "setClusterDnsNameservers", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "servers", + "description": "List of fully qualifed domain names or IPv4 addresses of DNS servers.", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "204": { + "description": "Successfully assigned the specified DNS servers to the Rubrik cluster." + } + }, + "x-group": "cluster" + } + }, + "/nutanix/vm/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Restore files", + "description": "Restore files from a snapshot to the source Nutanix virtual machine.", + "operationId": "restoreNutanixVmSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a job to restore files to a source Nutanix virtual machine.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixRestoreFilesConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the restore request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/cloud_on/azure/image/{id}": { + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Delete a given Azure cloud image", + "description": "Delete a given Azure cloud image.", + "operationId": "deleteAzurePublicCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud image.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the Azure image deletion request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get details about a given Azure cloud image", + "description": "Get details about a given Azure cloud image.", + "operationId": "getAzurePublicCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud image.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the azure cloud image.", + "schema": { + "$ref": "#/definitions/AzureImageDetail" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/archive/qstar/reader/connect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Connect to a QStar archival location as a reader", + "description": "Connect to an existing QStar archival location as a reader. Initiates an asynchronous job to connect to the archival location.\n", + "operationId": "connectQstarLocationAsReader", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Access credentials for the specified QStar archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/QtarReaderConnectDefinition" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to connect to a QStar archival location as a reader cluster.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/fileset/bulk": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Create filesets for a host", + "description": "Create filesets for a network host. Each fileset is a fileset template applied to a host.", + "operationId": "bulkCreateFileset", + "parameters": [ + { + "in": "body", + "name": "definitions", + "description": "For each fileset, specify a template id along with either host id or share id. If a share id is provided, the host id will be inferred from the host share.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetCreate" + } + } + } + ], + "responses": { + "201": { + "description": "Details of new filesets.", + "schema": { + "$ref": "#/definitions/FilesetDetailListResponse" + } + } + }, + "x-group": "fileset" + }, + "delete": { + "tags": [ + "/fileset" + ], + "summary": "Delete filesets", + "description": "Delete filesets by specifying the fileset IDs.", + "operationId": "bulkDeleteFileset", + "parameters": [ + { + "name": "ids", + "in": "query", + "description": "Provide a comma-separated list of fileset IDs.", + "required": true, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "preserve_snapshots", + "in": "query", + "description": "Flag to indicate whether to convert snapshots of all deleted filesets to relics or to delete them. Applies to all filesets. Default is true.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "204": { + "description": "Deleted all specified filesets." + }, + "404": { + "description": "Fileset deletion failed for at least one fileset." + } + }, + "x-group": "fileset" + } + }, + "/archive/qstar/remove_bucket": { + "post": { + "tags": [ + "/archive" + ], + "summary": "REQUIRES SUPPORT TOKEN - Remove all buckets matching given prefix", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to remove all QStar buckets matching given prefix. Returns a list of buckets successfully removed. A support token is required for this operation.", + "operationId": "removeQstarBucket", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Remove bucket request configurations.", + "required": true, + "schema": { + "$ref": "#/definitions/RemoveQstarBucketRequest" + } + } + ], + "responses": { + "200": { + "description": "List of buckets removed.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "archival" + } + }, + "/organization/{id}/archive/location": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get archival locations associated with this organization", + "description": "Retrieve the total list of archive locations that have been granted to this organization.", + "operationId": "getOrganizationArchiveLocations", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of Archive Locations that have been assigned to this organization.", + "schema": { + "$ref": "#/definitions/ArchivalLocationSummaryListResponse" + } + } + }, + "x-group": "organization_archival" + } + }, + "/node_management/cluster_ip": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get a list of a cluster's always-available Ips", + "description": "Get a list of a cluster's always-available Ips.", + "operationId": "getClusterIps", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "internal_node_management" + }, + "post": { + "tags": [ + "/node_management" + ], + "summary": "Modify the list of cluster IPs", + "description": "Modify the list of cluster IPs.", + "operationId": "updateClusterIps", + "parameters": [ + { + "in": "body", + "name": "new_cluster_ips", + "description": "New list of cluster IPs.", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/ClusterIpRec" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/polaris/failover/{id}/status": { + "get": { + "tags": [ + "/polaris/failover" + ], + "summary": "Returns a string representing the status of the failover", + "description": "Returns a string representing the status of the failover. This will be used by Polaris to resolve inconsistencies between state on Polaris and CDM to avoid getting trapped in unrecoverable states. This endpoint is reserved for Polaris.", + "operationId": "getFailoverStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the failover.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/FailoverStatusResponse" + } + } + }, + "x-group": "failover" + } + }, + "/aws/account/{id}/refresh": { + "post": { + "tags": [ + "/aws/account" + ], + "summary": "Refresh an AWS account", + "description": "Refresh the information an AWS account object identified by its ID.", + "operationId": "refreshAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account to refresh.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Created request to refresh an AWS account object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_account" + } + }, + "/host/share/{id}": { + "delete": { + "tags": [ + "/host/share" + ], + "summary": "Delete a network share", + "description": "Delete network by specifying the network share ID.", + "operationId": "deleteHostShare", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Modify network share with given ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Deleted specified network share." + } + }, + "x-group": "hosts" + }, + "get": { + "tags": [ + "/host/share" + ], + "summary": "Get detailed information for a network share", + "description": "Retrieve detailed information for a network share.", + "operationId": "getHostShare", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the network share.", + "required": true, + "type": "string" + } + ], + "responses": { + "201": { + "description": "Detailed information for the specified network share.", + "schema": { + "$ref": "#/definitions/HostShareDetail" + } + } + }, + "x-group": "hosts" + }, + "patch": { + "tags": [ + "/host/share" + ], + "summary": "Modify a network share", + "description": "Modify a network share object.", + "operationId": "modifyHostShare", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Modify network share with given ID.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "share", + "description": "Network share modification definition.", + "required": true, + "schema": { + "$ref": "#/definitions/HostSharePatch" + } + } + ], + "responses": { + "201": { + "description": "Summary information for modified network share.", + "schema": { + "$ref": "#/definitions/HostShareDetail" + } + } + }, + "x-group": "hosts" + } + }, + "/nutanix/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/nutanix/hierarchy" + ], + "summary": "Get list of descendant objects", + "description": "Retrieve the list of descendant objects for the specified parent.", + "operationId": "getNutanixHierarchyDescendants", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent Nutanix hierarchy object.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "cluster", + "vm" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of Nutanix VM hierarchy object. Return both relic and non-relic children if this query is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendantCount.cluster", + "descendantCount.vm" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters Nutanix hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/NutanixHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "nutanix_hierarchy" + } + }, + "/node": { + "get": { + "tags": [ + "/node" + ], + "summary": "Get list of nodes in this Rubrik cluster", + "description": "Returns the list of all Rubrik nodes.", + "operationId": "getNodes", + "parameters": [], + "responses": { + "200": { + "description": "List of nodes.", + "schema": { + "$ref": "#/definitions/NodeStatusListResponse" + } + } + }, + "x-group": "node" + } + }, + "/cluster/{id}/is_registered": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get registration status for a Rubrik cluster", + "description": "Check whether the specified Rubrik cluster is registered in the Rubrik customer database.", + "operationId": "isRegisteredWithRubrik", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or use *me* for the Rubrik cluster that is hosting the current session.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returns 'True' when the specified Rubrik cluster is registered.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/volume_group/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/volume_group" + ], + "summary": "Restore files from the Volume Group snapshot", + "description": "Restore filess to the original Host.", + "operationId": "restoreVolumeGroupSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration containing snapshot file paths and restore path.", + "required": true, + "schema": { + "$ref": "#/definitions/VolumeGroupRestoreFilesConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the restore request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/oracle/rac": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get summary information for Oracle RAC", + "description": "Retrieve an array containing summary information for the Oracle RAC objects on the Rubrik cluster.", + "operationId": "queryOracleRac", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filter a response by making an infix comparison of the Oracle RAC name in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Limit a response to the results that have the specified SLA Domain assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Limit a response to the results that have the specified primary cluster value.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the summary information to a specified maximum number of matches. Optionally, use with offset to start the count at a specified point. Optionally, use with sort_by to perform sort on given attributes. Include sort_order to determine the ascending or descending direction of sort.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of matches. The response includes the specified numbered entry and all higher numbered entries. Use with limit to retrieve the response as smaller groups of entries, for example for paging of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies a comma-separated list of attributes to use in sorting the matches. Performs an ASCII sort of the values in the response using each specified attribute, in the order specified.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleRacSummaryListResponse" + } + } + }, + "x-group": "oracle_rac" + } + }, + "/cloud_on/azure/request/{id}": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get asynchronous request details for Azure-related jobs", + "description": "Get the details of an asynchronous request that involves Azure.", + "operationId": "getAzureAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/config/usersettable_cerebro": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Cerebro configuration", + "description": "Fetch the global Cerebro configuration.", + "operationId": "getUserSettableCerebroConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCerebroConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Cerebro configuration", + "description": "Update the global Cerebro configuration.", + "operationId": "updateUserSettableCerebroConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalCerebroConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCerebroConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/snapshot/corrupt_chain": { + "post": { + "tags": [ + "/snapshot" + ], + "summary": "REQUIRES SUPPORT TOKEN - Corrupt an incremental snapshot by redirecting its base pointer", + "description": "REQUIRES SUPPORT TOKEN- Corrupt an incremental snapshot by redirecting its base pointer. CAUTION - This request races with Reverse and Expire, so care must be taken to disable these jobs or stop services (on all nodes). A support token is required for this operation.", + "operationId": "corruptSnapshotChain", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the snapshot corruption.", + "required": true, + "schema": { + "$ref": "#/definitions/CorruptSnapshotChainConfig" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_snapshot" + } + }, + "/hyperv/vm/request/{id}": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get VM async request details", + "description": "Get details about a Hyper-V vm related async request.", + "operationId": "getHypervVirtualMachineAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/app_blueprint/polaris_link/{id}": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Link to Blueprint detail page on Polaris", + "description": "Return the link to URL to view the Blueprint detail page on Polaris.", + "operationId": "getAppBlueprintPolarisLink", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information about a Blueprint object.", + "schema": { + "$ref": "#/definitions/AppBlueprintPolarisLink" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/stats/protected_primary_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get protected primary storage", + "description": "Get protected primary storage.", + "operationId": "protectedPrimarySnapshotStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/host_fileset/share": { + "get": { + "tags": [ + "/host_fileset" + ], + "summary": "Get summary information for network shares", + "description": "Retrieve summary information for the network shares that are registered with a Rubrik cluster, and summary information for the filesets that are assigned to each network share.", + "operationId": "queryHostFilesetShare", + "parameters": [ + { + "name": "hostname", + "in": "query", + "description": "Filter the summary information based on the hostname.", + "required": false, + "type": "string" + }, + { + "name": "export_point", + "in": "query", + "description": "Filter the summary information based on the export point.", + "required": false, + "type": "string" + }, + { + "name": "share_type", + "in": "query", + "description": "Filter the summary information based on the share type.", + "required": false, + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filters the summary information based on the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter the summary information based on the ID of the effective SLA Domain that is inherited by the filesets that are assigned to a network share. Use **_UNPROTECTED_** to only return information for network shares that have filesets without an effective SLA Domain. Use **_PROTECTED_** to only return information for network shares that have filesets with an effective SLA Domain.", + "required": false, + "type": "string" + }, + { + "name": "template_id", + "in": "query", + "description": "Filter the summary information based on the ID of a fileset templates applied to the network share. Use **_NO_FILESET_** to return information for network shares with no filesets. _NO_FILESET_ must be used with searchType 'exact'.", + "required": false, + "type": "string" + }, + { + "name": "search_type", + "in": "query", + "description": "Search type. Accepted values are 'infix' or 'exact'. Default is 'exact'.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Comma-separated list of attributes that define the sort order of the search results. Use the following attributes to form the list: exportPoint | hostName | shareType | status.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "sort_order", + "in": "query", + "description": "Comma-separated list of sort directions for each attribute type, use either 'asc' for ascending or 'desc' for descending. Match the order of the sort direction list values to the attribute list values to define the sort direction for each attribute type in the results.\nComma-separated list of sort orders (one for each sort_by) asc | desc\",.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Summary information for network shares.", + "schema": { + "$ref": "#/definitions/HostFilesetShareSummaryListResponse" + } + } + }, + "x-group": "host_fileset" + } + }, + "/search/snapshot_search": { + "get": { + "tags": [ + "/search" + ], + "summary": "Search for a file", + "description": "Perform a search for a file within a specified snapshot by using a prefix portion of the filename.", + "operationId": "snapshotSearchQuery", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "ID assigned to a snapshot object to search.", + "required": true, + "type": "string" + }, + { + "name": "name", + "in": "query", + "description": "Query string consisting of a prefix portion of the filename.", + "required": true, + "type": "string" + }, + { + "name": "dir", + "in": "query", + "description": "Full path of a directory to search.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SnapshotSearchResponseListResponse" + } + } + }, + "x-group": "search" + } + }, + "/hyperv/cluster/{id}": { + "get": { + "tags": [ + "/hyperv/cluster" + ], + "summary": "Get details of a Hyper-V cluster", + "description": "Get details of a Hyper-V cluster.", + "operationId": "getHypervCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V cluster.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details about the Hyper-V cluster.", + "schema": { + "$ref": "#/definitions/HypervClusterDetail" + } + } + }, + "x-group": "hyperv_cluster" + }, + "patch": { + "tags": [ + "/hyperv/cluster" + ], + "summary": "Update Hyper-V cluster", + "description": "Update cluster with specified properties.", + "operationId": "updateHypervCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Hyper-V cluster.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervClusterUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about virtual machine.", + "schema": { + "$ref": "#/definitions/HypervClusterDetail" + } + } + }, + "x-group": "hyperv_cluster" + } + }, + "/managed_object/{managed_id}/children": { + "get": { + "tags": [ + "/managed_object" + ], + "summary": "Gets the summaries of a managed object's children", + "description": "Gets the summaries of a managed object's immediate children.", + "operationId": "getObjectChildren", + "parameters": [ + { + "name": "managed_id", + "in": "path", + "description": "ManagedID of object whose summary to get.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "object_type", + "in": "query", + "description": "Comma-separated list of objectType values by which to search (all if not specified).", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "operating_system_type", + "in": "query", + "description": "Comma-separated list of operatingSystemType values by which to search (all if not specified): Linux|Windows.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "offset", + "in": "query", + "description": "Starting offset of the results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or local.", + "required": false, + "type": "string" + }, + { + "name": "search_attr", + "in": "query", + "description": "Comma-separated list of attributes by which to search: name|hostname.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "search_value", + "in": "query", + "description": "Comma-separated list of values by which to search (one for each search_attr).", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "search_type", + "in": "query", + "description": "Comma-separated list of search types (one for each search_attr): prefix|infix|exact.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "join_operator", + "in": "query", + "description": "Whether results must match any or all of the search attributes: all|any (default is \"all\").", + "required": false, + "type": "string" + }, + { + "name": "sort_attr", + "in": "query", + "description": "Comma-separated list of attributes by which to sort: name|objectType|hostname.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "sort_order", + "in": "query", + "description": "Comma-separated list of sort orders (one for each sort_attr): asc|desc.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Summaries of each child object.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "managed_object" + } + }, + "/cluster/{id}/node": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Add nodes", + "description": "Add nodes to the specified Rubrik cluster.", + "operationId": "addNewNodes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "request", + "description": "The request object for addNodes.", + "required": true, + "schema": { + "$ref": "#/definitions/AddNodesConfig" + } + } + ], + "responses": { + "202": { + "description": "Job Id to check pending add node request.", + "schema": { + "$ref": "#/definitions/AddNodesOperation" + } + } + }, + "x-group": "cluster" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get nodes", + "description": "Retrieve the list of nodes in this Rubrik cluster.", + "operationId": "getClusterNodes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of node statuses.", + "schema": { + "$ref": "#/definitions/NodeStatusListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/polaris/failover/source/{id}/start": { + "post": { + "tags": [ + "/polaris/failover" + ], + "summary": "Starts the failover process in the source cluster", + "description": "It will start the failover process in the source cluster. It will shutdown the VMs in the vCenter and will take a snapshot. For the failover to cloud, it will also convert the snapshot.", + "operationId": "startFailoverOnSource", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Blueprint.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the job to specify if failover is zero data loss and whether to power off the virtual machines of the Blueprint, and optionally a snapshot ID to failover.", + "required": true, + "schema": { + "$ref": "#/definitions/TriggerFailoverOnSourceDefinition" + } + } + ], + "responses": { + "202": { + "description": "Status of the job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "failover" + } + }, + "/archive/object_store/reader/connect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Connect to an object storage archival location as reader", + "description": "Connect the current cluster as a reader to an existing object storage location. Initiates an asynchronous job to connect to the archival location.\n", + "operationId": "connectObjectStoreAsReader", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Access credentials for the specified object storage archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreReaderConnectDefinition" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to connect to an object storage archival location as a reader cluster.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/app_blueprint/{id}/snapshot": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get list of snapshots of a Blueprint", + "description": "Retrieve summary information for each of the snapshot objects of a specified Blueprint object.", + "operationId": "queryAppBlueprintSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary snapshot information for a Blueprint object.", + "schema": { + "$ref": "#/definitions/AppBlueprintSnapshotSummaryListResponse" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/archive/location/{id}/reader/refresh": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Refresh archive information", + "description": "Update the current Rubrik cluster with information about the changes made to an archival location by the Rubrik cluster that owns the archival location.\n", + "operationId": "refreshArchivalLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an archival location object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to refresh archival information.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/node_management/route_config": { + "post": { + "tags": [ + "/node_management" + ], + "summary": "Add a new route config to all hosts", + "description": "Add a new route config to all hosts.", + "operationId": "addRoute", + "parameters": [ + { + "in": "body", + "name": "route_config", + "description": "Network, netmask and gateway.", + "required": true, + "schema": { + "$ref": "#/definitions/RouteConfig" + } + } + ], + "responses": { + "201": { + "description": "Returned if route successfully added to node table.", + "schema": { + "$ref": "#/definitions/RouteConfig" + } + }, + "422": { + "description": "Returned if no nodes were found.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + }, + "delete": { + "tags": [ + "/node_management" + ], + "summary": "Delete an existing route", + "description": "Delete an existing route that was configured.", + "operationId": "deleteRoute", + "parameters": [ + { + "in": "body", + "name": "route_config", + "description": "Network and netmask.", + "required": true, + "schema": { + "$ref": "#/definitions/RouteDeletionConfig" + } + } + ], + "responses": { + "204": { + "description": "Returned if route successfully deleted from node table." + }, + "404": { + "description": "Returned if route not found.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Retuned if nodes were found.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + }, + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get all existing route configs", + "description": "Lists all existing route configs.", + "operationId": "getRoutes", + "parameters": [], + "responses": { + "200": { + "description": "Returns list of existing route configs.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/RouteConfig" + } + } + }, + "422": { + "description": "Returned if no nodes were found.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/cloud_on/azure/instance/{id}": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get details about a given Azure cloud instance", + "description": "Get details about a given Azure cloud instance.", + "operationId": "getAzurePublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the Azure cloud instance.", + "schema": { + "$ref": "#/definitions/AzureInstanceDetail" + } + } + }, + "x-group": "cloud_instance" + }, + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Remove entry of a given Azure cloud instance", + "description": "Remove entry of a given Azure cloud instance. This deletes the instance metadata from Rubrik but doesn't terminate the instance running on cloud. This is an irreversible operation.\n", + "operationId": "removeAzurePublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully stopped managing Azure cloud instance." + } + }, + "x-group": "cloud_instance" + } + }, + "/node_management/replace_node": { + "post": { + "tags": [ + "/node_management" + ], + "summary": "REQUIRES SUPPORT TOKEN - Replace a removed node with a new node, keeping HDDs and IPs", + "description": "REQUIRES SUPPORT TOKEN - Replace a removed node with a new node. A support token is required for this operation.", + "operationId": "replaceNode", + "parameters": [ + { + "in": "body", + "name": "replace_node_config", + "description": "IDs of new node and node to replace.", + "required": true, + "schema": { + "$ref": "#/definitions/ReplaceNodeConfig" + } + } + ], + "responses": { + "200": { + "description": "Request handle to check replace node request status.", + "schema": { + "$ref": "#/definitions/ReplaceNodeRec" + } + } + }, + "x-group": "internal_node_management" + }, + "get": { + "tags": [ + "/node_management" + ], + "summary": "REQUIRES SUPPORT TOKEN - Returns the status of the corresponding replaceNode request", + "description": "REQUIRES SUPPORT TOKEN - Returns the status of the corresponding replaceNode request. A support token is required for this operation.", + "operationId": "replaceNodeStatus", + "parameters": [], + "responses": { + "200": { + "description": "Replace node request status.", + "schema": { + "$ref": "#/definitions/ReplaceNodeStatus" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/smtp_instance/send_test_email": { + "post": { + "tags": [ + "/smtp_instance" + ], + "summary": "send email", + "description": "Send email to admin users if toEmailIds is empty.", + "operationId": "sendTestEmail", + "parameters": [ + { + "in": "body", + "name": "email_ids", + "description": "send email params.", + "required": true, + "schema": { + "$ref": "#/definitions/SendEmailParams" + } + } + ], + "responses": { + "204": { + "description": "Email sent." + } + }, + "x-group": "smtp_instance" + } + }, + "/cluster/{id}/snmp_configuration": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get SNMP configuration of the cluster", + "description": "Get SNMP configuration of the queried cluster.", + "operationId": "getSnmpConfiguration", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/SnmpConfiguration" + } + } + }, + "x-group": "cluster" + }, + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Update SNMP configuration", + "description": "Update the SNMP configuration for a specified Rubrik cluster.", + "operationId": "updateSnmpConfig", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "snmp_config", + "description": "SNMP configuration updates for the specified Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/SnmpConfigurationPatch" + } + } + ], + "responses": { + "200": { + "description": "Returned if the update was successful.", + "schema": { + "$ref": "#/definitions/SnmpConfiguration" + } + } + }, + "x-group": "cluster" + } + }, + "/nutanix/vm/snapshot/{id}/export": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Export VM snapshot", + "description": "Export snapshot of a vm.", + "operationId": "createNutanixExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the export request.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixVmExportSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/cluster/{id}/install": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get the status of a Rubrik CDM install request", + "description": "Retrieves the status of a Rubrik CDM install request for a cluster.", + "operationId": "getClusterInstallStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "query", + "description": "Id of the install request.", + "required": false, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "Status of the install request.", + "schema": { + "$ref": "#/definitions/InstallStatus" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Install Rubrik CDM on a cluster", + "description": "Issues an install request to a specified Rubrik cluster.", + "operationId": "installCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "install_config", + "description": "Install configuration for the Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/ClusterInstallConfig" + } + } + ], + "responses": { + "202": { + "description": "Request handle to check the install status.", + "schema": { + "$ref": "#/definitions/NodeOperation" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/stats/runway_remaining": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get the number of days remaining before the system fills up", + "description": "Returns the estimated number of days before the available system storage is filled.", + "operationId": "runwayRemaining", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute 'days'(Long).", + "schema": { + "$ref": "#/definitions/Runway" + } + } + }, + "x-group": "internal_report" + } + }, + "/cluster/{id}/remove_nodes": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Removes nodes from a Rubrik cluster", + "description": "Schedules nodes for removal.", + "operationId": "removeNodes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "node_ids", + "description": "List of IDs of the nodes to be removed.", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "202": { + "description": "Job ID to check the status of the removal of the nodes.", + "schema": { + "type": "string" + } + } + }, + "x-group": "cluster" + } + }, + "/host_fileset/share/{id}": { + "get": { + "tags": [ + "/host_fileset" + ], + "summary": "Get detailed information for a network share", + "description": "Retrieve detailed information for a network share, including detailed information for the filesets that are assigned to the network share.", + "operationId": "getHostFilesetShare", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the network share.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information for the specified network share.", + "schema": { + "$ref": "#/definitions/HostFilesetShareDetail" + } + } + }, + "x-group": "host_fileset" + } + }, + "/fileset/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Create restore job to restore multiple files/directories", + "description": "Initiate a job to copy one or more file or folder from a fileset backup to the source host. Returns the job instance ID.", + "operationId": "createFilesetRestoreFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for job to restore one or more files or folders from a fileset backup.", + "required": true, + "schema": { + "$ref": "#/definitions/FilesetRestoreFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the restore request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "fileset" + } + }, + "/nutanix/cluster/{id}/container": { + "get": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Get list of containers on this cluster", + "description": "Query the nutanix cluster to get the list of containers, used for export purposes.", + "operationId": "getContainers", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix cluster.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the ID and name of containers on the Nutanix cluster.", + "schema": { + "$ref": "#/definitions/NutanixContainerListResponse" + } + } + }, + "x-group": "nutanix_cluster" + } + }, + "/cluster/{id}/recommission_node": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Recommission a node", + "description": "Schedule a node for recommission.", + "operationId": "commissionNode", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "node_id", + "description": "Id of the node to recommission.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "202": { + "description": "Request handle to check recommission status.", + "schema": { + "$ref": "#/definitions/NodeOperation" + } + }, + "422": { + "description": "Could not recommission node (Invalid Input).", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "cluster" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get node recommission status", + "description": "Retrieve the status of a specified asynchronous node recommission request.", + "operationId": "commissionRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "query", + "description": "Recommission request id.", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "Recommission node request status.", + "schema": { + "$ref": "#/definitions/RecommissionNodeStatus" + } + } + }, + "x-group": "cluster" + } + }, + "/volume_group/{id}/snapshot": { + "delete": { + "tags": [ + "/volume_group" + ], + "summary": "Delete all snapshots of Volume Group", + "description": "Delete all snapshots of a Volume Group.", + "operationId": "deleteVolumeGroupSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Volume Group ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "x-group": "volume_group" + } + }, + "/stats/snapshot_storage/logical": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot logical storage", + "description": "Retrieve the amount of logical Rubrik cluster storage used by snapshots.", + "operationId": "logicalSnapshotStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/cluster/{id}/ntp_server": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get NTP Servers", + "description": "Retrieve a list of the NTP servers assigned to the Rubrik cluster. Encryption keys are not reported.", + "operationId": "getClusterNtpServers", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of the NTP servers assigned to the specified Rubrik cluster.", + "schema": { + "$ref": "#/definitions/NtpServerConfigurationListResponse" + } + } + }, + "x-group": "cluster" + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Assign NTP servers to Rubrik cluster", + "description": "Assign NTP servers to Rubrik cluster.", + "operationId": "setClusterNtpServers", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "ntp_server_configs", + "description": "List of NTP servers.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/NtpServerConfiguration" + } + } + } + ], + "responses": { + "204": { + "description": "Successfully assigned the specified NTP servers to the Rubrik cluster." + } + }, + "x-group": "cluster" + } + }, + "/storage/array/volume": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get details for all storage array volumes", + "description": "Retrieve the summary details for all storage array volume objects.", + "operationId": "queryStorageArrayVolume", + "parameters": [ + { + "name": "storage_array_id", + "in": "query", + "description": "Filter the query results by using the ID assigned to a storage array object.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary information for all storage array volumes.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeSummaryListResponse" + } + } + }, + "x-group": "storage_array" + } + }, + "/polaris/export_thrift": { + "post": { + "tags": [ + "/polaris" + ], + "summary": "Export information about a given metadata type in Thrift form", + "description": "Trigger an asynchronous job that uploads a file with metadata information about the given metadata type in serialized Thrift format.", + "operationId": "exportThriftMetadataInfo", + "parameters": [ + { + "in": "body", + "name": "export_thrift_metadata", + "description": "Config for export thrift job.", + "required": true, + "schema": { + "$ref": "#/definitions/ExportThriftInfoConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous job to get object information.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/authorization": { + "get": { + "tags": [ + "/authorization" + ], + "summary": "Queries the current list of explicit authorizations", + "description": "Queries the current list of explicit authorizations by principal.\n", + "operationId": "queryAuthorization", + "parameters": [ + { + "name": "principals", + "in": "query", + "description": "List of principals whose authorizations to query.", + "required": true, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returns the list of explicit authorizations for each specified principal.\n", + "schema": { + "$ref": "#/definitions/AuthorizationSummaryListResponse" + } + }, + "400": { + "description": "Returned if an invalid ManagedId is given.", + "schema": { + "type": "string" + } + } + }, + "x-group": "authorization" + } + }, + "/cluster/{id}/is_azure_cloud_only": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Check if the cluster supports only Azure cloud", + "description": "Checks if the Rubrik cluster has restricted cloud functionality to Azure cloud only.", + "operationId": "isAzureCloudOnly", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "True if the cluster supports only Azure cloud.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/hierarchy/search": { + "post": { + "tags": [ + "/hierarchy" + ], + "summary": "Search for hierarchy objects", + "description": "Search by name, location and slaDomain over hierarchy objects for specified object types.", + "operationId": "hierarchySearch", + "parameters": [ + { + "in": "body", + "name": "query", + "description": "Search query to perform.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedHierarchySearchObject" + } + } + ], + "responses": { + "200": { + "description": "Return the list of summaries of matching objects.", + "schema": { + "$ref": "#/definitions/SearchItemSummaryListResponse" + } + } + }, + "x-group": "hierarchy" + } + }, + "/archive/location": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get summary information for all archival locations", + "description": "Retrieve information for all archival locations, including ID, type, status, address, and bucket count.", + "operationId": "queryArchivalLocations", + "parameters": [ + { + "name": "status", + "in": "query", + "description": "Filters archival locations by status. Accepted values: 'active'.", + "required": false, + "type": "string" + }, + { + "name": "name", + "in": "query", + "description": "Filters the retrieved list of archival locations by the archival location name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the attribute to use when sorting the retrieved list of archival locations. Optionally, use **_sort_order_** to specify whether to sort in ascending(asc) or descending(desc) order.", + "required": false, + "type": "string", + "enum": [ + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "location_type", + "in": "query", + "description": "Filter by location type.", + "required": false, + "type": "string" + }, + { + "name": "show_retention_locked_snapshot_presence", + "in": "query", + "description": "Flag to retrieve presence of snapshots retained by Retention Lock SLA Domains.", + "required": false, + "type": "boolean" + }, + { + "name": "show_snapshots_legal_hold_status", + "in": "query", + "description": "Flag to retrieve presence of snapshots placed under Legal Hold.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Returns a list of archival locations.", + "schema": { + "$ref": "#/definitions/ArchivalLocationSummaryListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/aws/account/dca": { + "post": { + "tags": [ + "/aws/account/dca" + ], + "summary": "Add a DCA AWS account. A DCA AWS account is an aws account which has a DCA region", + "description": "Add a DCA AWS account object using specified configuration.", + "operationId": "createDcaAwsAccount", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration to use to add a DCA AWS account object.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaAwsAccountCreate" + } + } + ], + "responses": { + "202": { + "description": "Created request to add a DCA AWS account.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_account" + } + }, + "/job/{id}": { + "get": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Get details about a job instance", + "description": "REQUIRES SUPPORT TOKEN - Retrieve the following information about job instance- ID of job instance, job status, error details, start time of job, end time of job, job type, ID of the node and job progress. A support token is required for this operation.", + "operationId": "getJobInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Job instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return job instance details.", + "schema": { + "$ref": "#/definitions/InternalJobInstanceDetail" + } + } + }, + "x-group": "internal_job" + }, + "patch": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - End point to update general properties of a job", + "description": "REQUIRES SUPPORT TOKEN - This is to allow one to update general properties of a job. A support token is required for this operation.", + "operationId": "updateJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the job.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "job_update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/JobUpdate" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/JobDetail" + } + } + }, + "x-group": "internal_job" + } + }, + "/polaris/app_blueprint": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Create a new Blueprint object", + "description": "Create a new Blueprint object.", + "operationId": "createAppBlueprint", + "parameters": [ + { + "in": "body", + "name": "app_blueprint", + "description": "Definition for a Blueprint.", + "required": true, + "schema": { + "$ref": "#/definitions/AppBlueprintCreate" + } + } + ], + "responses": { + "202": { + "description": "Details of an updated Blueprint object.", + "schema": { + "$ref": "#/definitions/AppBlueprintDetail" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/mssql/db/{id}/snappable_id": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns the protected object for a Microsoft SQL database", + "description": "Returns the snappableId for a Microsoft SQL database. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/{id}/snappable_id`.", + "operationId": "mssqlGetSnappableId", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL database.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the snappableId.", + "schema": { + "$ref": "#/definitions/MssqlSnappableId" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/storage/array/hierarchy/{id}/children": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get list of children", + "description": "Get a list of the immediate subordinate objects of a storage array object.", + "operationId": "getStorageArrayHierarchyChildren", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a parent storage array hierarchy object.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by the ID of the effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by the object type.", + "required": false, + "type": "string", + "enum": [ + "AppBlueprint", + "AwsAccount", + "CloudCompute", + "CloudComputeRegion", + "CloudNativeAuthzRoot", + "ComputeCluster", + "DataCenter", + "DataStore", + "Ec2Instance", + "ExclusionPattern", + "ExclusionPatternAuthzRoot", + "Folder", + "Hdfs", + "HostFailoverCluster", + "HostRoot", + "HypervAuthzRoot", + "HypervCluster", + "HypervScvmm", + "HypervServer", + "HypervVirtualMachine", + "FailoverClusterApp", + "KuprHost", + "KuprHostAuthzRoot", + "LinuxFileset", + "LinuxHost", + "LinuxHostAuthzRoot", + "ManagedVolume", + "ManagedVolumeAuthzRoot", + "ManagedVolumeRoot", + "MssqlAuthzRoot", + "MssqlDatabase", + "MssqlAvailabilityGroup", + "MssqlInstance", + "NasHost", + "NasHostAuthzRoot", + "NasSystem", + "NfsHostShare", + "NutanixAuthzRoot", + "NutanixCluster", + "NutanixVirtualMachine", + "OracleAuthzRoot", + "OracleDatabase", + "OracleHost", + "OracleRac", + "OracleRoot", + "SapHanaAuthzRoot", + "SapHanaDatabase", + "SapHanaSystem", + "ShareFileset", + "SlaDomain", + "SmbHostShare", + "StorageArray", + "StorageArrayVolume", + "StorageArrayVolumeGroup", + "Storm", + "User", + "vCenter", + "Vcd", + "VcdAuthzRoot", + "VcdCatalog", + "VcdOrg", + "VcdOrgVdc", + "VcdVapp", + "VcdVimServer", + "VirtualMachine", + "VmwareAuthzRoot", + "VmwareHost", + "VmwareResourcePool", + "VmwareStoragePolicy", + "VmwareTag", + "VmwareTagCategory", + "WindowsCluster", + "WindowsFileset", + "WindowsHost", + "WindowsHostAuthzRoot", + "WindowsVolumeGroup" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by the ID of the primary Rubrik CDM instance. Use local to specify the Rubrik CDM instance that is hosting the current API session.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Filter by the object name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by the SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "enum": [ + "name", + "effectiveSlaDomainName", + "descendantCountArray", + "descendantCountVolume", + "descendantCountVolumeGroup" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of children of a storage array hierarchy object.", + "schema": { + "$ref": "#/definitions/StorageArrayHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/archive/dca/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get information about a specific DCA archival location", + "description": "Get information about a specific DCA archival location.", + "operationId": "getDcaLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a DCA archival location.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Object containing information about the archival location.", + "schema": { + "$ref": "#/definitions/DcaLocationDetail" + } + } + }, + "x-group": "archival" + }, + "patch": { + "tags": [ + "/archive" + ], + "summary": "Update a DCA archival location", + "description": "Update the properties of a DCA archival location.", + "operationId": "updateDcaLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the DCA archival location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "updated_definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaUpdateDefinition" + } + } + ], + "responses": { + "200": { + "description": "Returns the successfully updated archival location object.", + "schema": { + "$ref": "#/definitions/DcaLocationDetail" + } + } + }, + "x-group": "archival" + } + }, + "/vmware/config/esx_subnets": { + "get": { + "tags": [ + "/vmware/config" + ], + "summary": "Get the esxSubnets", + "description": "Retrieve the preferred subnets to reach ESXi hosts.", + "operationId": "queryEsxSubnets", + "parameters": [], + "responses": { + "200": { + "description": "The preferred subnets to reach to ESX hosts.", + "schema": { + "$ref": "#/definitions/EsxSubnets" + } + } + }, + "x-group": "vmware_config" + } + }, + "/managed_volume/{id}/inflight_snapshot": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get information about the currently in-flight snapshot", + "description": "Returns summary information about the in-flight snapshot if the managed volume is open for write operations. When the managed volume is in any other state, returns a 404 error.", + "operationId": "getManagedVolumeInflightSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the managed volume.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return information about the in-flight snapshot.", + "schema": { + "$ref": "#/definitions/ManagedVolumeInflightSnapshotSummary" + } + } + }, + "x-group": "managed_volume" + } + }, + "/volume_group/host_layout/{snapshot_id}": { + "get": { + "tags": [ + "/volume_group" + ], + "summary": "Get the host layout of a Volume Group snapshot", + "description": "Use a snapshot ID to retrieve the host layout for the source host, including the layout of all disks and all volumes on the host.", + "operationId": "getHostLayoutWithSnapshotId", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the host layout of a snapshot.", + "schema": { + "$ref": "#/definitions/WindowsHostLayout" + } + } + }, + "x-group": "volume_group" + } + }, + "/oracle/db/{id}/mount": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "Live Mount an Oracle database snapshot", + "description": "Create an asynchronous job to Live Mount an Oracle database from a snapshot.", + "operationId": "createOracleMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Oracle database.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration parameters for a job to Live Mount an Oracle database snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/MountOracleDbConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for an async job to Live Mount an Oracle database snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/vmware/vcenter/tag_category/{tag_category_id}": { + "get": { + "tags": [ + "/vmware/vcenter" + ], + "summary": "Get Tag Category by Id", + "description": "Get a Tag Category in a vCenter by its ID.", + "operationId": "getVsphereTagCategory", + "parameters": [ + { + "name": "tag_category_id", + "in": "path", + "description": "ID of the Tag Category.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A vCenter Tag Category.", + "schema": { + "$ref": "#/definitions/VsphereCategory" + } + } + }, + "x-group": "vcenter" + } + }, + "/mssql/db/recoverable_range/download/{id}": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "Get the deletion status of downloaded recoverable ranges", + "description": "Get the details of the progress made in deleting recoverable ranges. The recoverable ranges to delete are those specified by the DELETE request to /mssql/db/{id}/recoverable_range/download which yielded the response with the job id.", + "operationId": "getDeleteMssqlDbRecoverableRangesStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Job ID of the deletion for which to check progress.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the progress made in deleting the recoverable ranges.", + "schema": { + "$ref": "#/definitions/InternalJobInstanceDetail" + } + } + }, + "x-group": "mssql" + } + }, + "/cloud_on/aws/instance_type_list": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get list of all instance types", + "description": "Get list of all instance types.", + "operationId": "getAwsInstanceTypeList", + "parameters": [], + "responses": { + "200": { + "description": "Returns the list of instance types.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/RecommendedInstanceType" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/hyperv/vm/{id}": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get VM details", + "description": "Detailed view of a VM.", + "operationId": "getHypervVirtualMachine", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Virtual Machine.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return details about the Virtual machine.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineDetail" + } + } + }, + "x-group": "hyperv_vm" + }, + "patch": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Update VM", + "description": "Update VM with specified properties.", + "operationId": "updateHypervVirtualMachine", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Virtual Machine.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "vm_update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervVirtualMachineUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about virtual machine.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineDetail" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/cloud_on/aws/instance/{id}/cloud_vm": { + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Terminate a given AWS cloud instance", + "description": "Terminates a given AWS instance on cloud. The instance status should be STOPPED for the termination to happen.\n", + "operationId": "deleteAwsPublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the AWS instance deletion request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "patch": { + "tags": [ + "/cloud_on" + ], + "summary": "Turn on or off a given AWS cloud instance", + "description": "Turn on or off a given AWS cloud instance.", + "operationId": "switchAwsPublicCloudMachineInstancePower", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS cloud instance.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/CloudInstanceUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns updated details about the AWS cloud instance.", + "schema": { + "$ref": "#/definitions/AwsInstanceDetail" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/role/{id}/authorization": { + "post": { + "tags": [ + "/authorization" + ], + "summary": "Grants authorizations to a role", + "description": "Grant the specified authorizations to the specified role.", + "operationId": "grantAuthorizations", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The role ID of the role being granted authorizations.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "role_authorization_specification", + "description": "Specification that describes the authorizations being granted to the role.\n", + "required": true, + "schema": { + "$ref": "#/definitions/RoleAuthorizationSpecification" + } + } + ], + "responses": { + "200": { + "description": "Summary of authorizations granted to the role.", + "schema": { + "$ref": "#/definitions/RoleAuthorizationSummary" + } + } + }, + "x-group": "role_authorization" + }, + "get": { + "tags": [ + "/authorization" + ], + "summary": "Queries the current list of explicit authorizations for a role", + "description": "Query the current list of explicit authorizations for a role.\n", + "operationId": "queryAuthorizationsForRole", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The role ID of the role to query for authorizations.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary of authorizations granted to the role.", + "schema": { + "$ref": "#/definitions/RoleAuthorizationSummary" + } + } + }, + "x-group": "role_authorization" + } + }, + "/nutanix/cluster/{id}": { + "delete": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Remove Nutanix cluster", + "description": "Initiates an asynchronous job to remove a Nutanix cluster object. The Nutanix cluster cannot have VMs mounted through the Rubrik cluster.", + "operationId": "deleteNutanixCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix cluster to remove.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_cluster" + }, + "get": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Get the details of a Nutanix Cluster", + "description": "Retrieve detailed information for a Nutanix Cluster object.", + "operationId": "getNutanixCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix Cluster.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a Nutanix Cluster object.", + "schema": { + "$ref": "#/definitions/NutanixClusterDetail" + } + } + }, + "x-group": "nutanix_cluster" + }, + "patch": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Patch Nutanix cluster", + "description": "Patch the host, credentials, and/or CA certs of the specified Nutanix cluster object.", + "operationId": "patchNutanixCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix cluster.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "patch_properties", + "description": "Object containing updated Nutanix cluster information.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixClusterPatch" + } + } + ], + "responses": { + "200": { + "description": "Detail of the updated Nutanix cluster object.", + "schema": { + "$ref": "#/definitions/NutanixClusterDetail" + } + } + }, + "x-group": "nutanix_cluster" + } + }, + "/nutanix/vm/{id}/search": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Search for file in Nutanix VM", + "description": "Search for a file within the Nutanix Virtual Machine. Search via full path prefix or filename prefix.", + "operationId": "searchNutanixVm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Either path prefix or filename prefix.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/polaris/failover/recovery_spec/upsert": { + "post": { + "tags": [ + "/polaris/failover" + ], + "summary": "Upserts snappable recovery specs for snappables", + "description": "Upserts snappable recovery specs of the snappables specifed in the body. Returns error if upsert validation fails.", + "operationId": "upsertSnappableRecoverySpecs", + "parameters": [ + { + "in": "body", + "name": "snappable_recovery_spec_details", + "description": "The snappable recovery spec details. The details should include list of snappable IDs and corresponding snappable recovery spec.", + "required": true, + "schema": { + "$ref": "#/definitions/SnappableRecoverySpecDetails" + } + } + ], + "responses": { + "201": { + "description": "Snappable recovery specs successfully upserted.", + "schema": { + "$ref": "#/definitions/UpsertSnappableRecoverySpecResponse" + } + } + }, + "x-group": "failover" + } + }, + "/data_location/object_store": { + "delete": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Tries to clean up the data in the bucket in the ObjectSTore spec", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to clean the buckets. A support token is required for this operation.", + "operationId": "cleanObjectStoreBucket", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Data Location definition.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDefinition" + } + } + ], + "responses": { + "204": { + "description": "Returned if bucket was successfully emptied." + } + }, + "x-group": "archival" + } + }, + "/managed_volume/{id}/search": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Search for files in a managed volume", + "description": "Search a managed volume for files.", + "operationId": "searchManagedVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the managed volume.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Either path prefix or filename prefix.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "managed_volume" + } + }, + "/polaris/app_blueprint/snapshot/{id}/download": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Download snapshot from archive", + "description": "Provides a method for retrieving a snapshot that is not locally available from an archival location.", + "operationId": "createAppBlueprintDownloadSnapshotFromCloud", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/aws/hierarchy/{id}": { + "get": { + "tags": [ + "/aws/hierarchy" + ], + "summary": "Get summary of a hierarchy object", + "description": "Retrieve details for the specified hierarchy object.", + "operationId": "getAwsHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS instance object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Hierarchy details for an AWS instance object.", + "schema": { + "$ref": "#/definitions/AwsHierarchyObjectSummary" + } + } + }, + "x-group": "aws_hierarchy" + } + }, + "/polaris/failover/image/{id}/pin": { + "post": { + "tags": [ + "/polaris/failover" + ], + "summary": "Pins Blueprint cloud machine image during failover", + "description": "Pin the Blueprint cloud machine image while the failover is running so that it doesn't get deleted during failover.", + "operationId": "pinAppCloudMachineImageForFailover", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint cloud machine image.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "failover_id", + "description": "ID of the failover job triggering the pin.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Image successfully pinned." + } + }, + "x-group": "failover" + } + }, + "/organization/{id}/vmware/vm/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get vmware vm metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getVmwareVmMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_vmware" + } + }, + "/fileset/{id}/unprotect": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Unprotect a fileset", + "description": "Remove SLA assignment from a fileset, specifying what retention policy to apply to existing snapshots.", + "operationId": "unprotectFileset", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of fileset.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "existing_snapshot_retention", + "description": "Specifies the retention policy to apply to existing snapshots when unprotecting a fileset.", + "required": true, + "schema": { + "$ref": "#/definitions/ExistingSnapshotRetention" + } + } + ], + "responses": { + "204": { + "description": "Successfully unprotected fileset." + } + }, + "x-group": "fileset" + } + }, + "/cloud_on/validate/instantiate_on_cloud": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Validate a snapshot for full conversion/instantiate job", + "description": "Runs validation on a snapshot for conversion, while reading the VM details populated by indexing in database. Can return not-ready response if the snapshot has not been indexed yet. Can be run on both snapshots and snappables, and in case of snappables, the last indexed snapshot is validated.\n", + "operationId": "queryValidateInstantiateOnCloud", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "ID of the snapshot to validate.", + "required": false, + "type": "string" + }, + { + "name": "snappable_id", + "in": "query", + "description": "ID of snappable to validate.", + "required": false, + "type": "string" + }, + { + "name": "cloud_provider", + "in": "query", + "description": "Cloud provider.", + "required": true, + "type": "string", + "enum": [ + "S3", + "Azure" + ] + } + ], + "responses": { + "200": { + "description": "Validation result.", + "schema": { + "$ref": "#/definitions/ValidationResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/stats/total_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get total storage in cluster", + "description": "Get total storage in cluster.", + "operationId": "totalStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/host/{id}/async": { + "delete": { + "tags": [ + "/host/async" + ], + "summary": "Delete a registered host asynchronously", + "description": "Delete host asynchronously by specifying the host ID.", + "operationId": "deleteHostAsync", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the host to delete.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Async request for deleting the host.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hosts" + } + }, + "/managed_volume/{id}": { + "delete": { + "tags": [ + "/managed_volume" + ], + "summary": "Delete a managed volume", + "description": "Delete a managed volume.", + "operationId": "deleteManagedVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of managed volume.", + "required": true, + "type": "string" + }, + { + "name": "preserve_snapshots", + "in": "query", + "description": "Flag to indicate whether to convert snapshots of this managed volume to relics or to delete them. Default is true.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "204": { + "description": "Managed volume sucessfully deleted." + } + }, + "x-group": "managed_volume" + }, + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get details of a managed volume", + "description": "Details of a managed volume.", + "operationId": "getManagedVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the managed volume.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a managed volume.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSummary" + } + } + }, + "x-group": "managed_volume" + }, + "patch": { + "tags": [ + "/managed_volume" + ], + "summary": "Update managed volume", + "description": "Update managed volume with specified properties.", + "operationId": "updateManagedVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of managed volume.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about a managed volume.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSummary" + } + } + }, + "x-group": "managed_volume" + } + }, + "/archive/object_store/reconnect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Reconnect to a specific object storage archival location", + "description": "Reconnect to a specific object storage location. Initiates an asynchronous job to connect to the archival location. This operation is deprecated. Use /archive/object_store/reader/connect instead.\n", + "operationId": "reconnectObjectStore", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Archival location credentials.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreReconnectDefinition" + } + } + ], + "responses": { + "202": { + "description": "Returns the job instance id for the reconnect job.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "deprecated": true, + "x-group": "archival" + } + }, + "/oracle/request/{id}": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get Oracle database async request details", + "description": "Retrieve the task object for a specified Oracle database asynchronous request.", + "operationId": "getOracleAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the async request job.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Oracle database async request details.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/sla_domain/{id}/protected_objects": { + "get": { + "tags": [ + "/sla_domain" + ], + "summary": "Get list of entities explicitly protected by the SLA Domain with direct assignments. This call only returns objects that are explicitly protected by the SLA Domain with a direct SLA Domain assignment, which are not necessarily all objects protected by that SLA Domain", + "description": "Retrieve IDs of all entities protected by a specified SLA Domain.", + "operationId": "queryProtectedEntitiesOfSla", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the SLA Domain.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of IDs of entities protected by the SLA Domain.", + "schema": { + "$ref": "#/definitions/ProtectedEntityListResponse" + } + } + }, + "x-group": "sla_domain" + } + }, + "/cluster/{id}/name": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get cluster name", + "description": "Retrieves the name of the Rubrik cluster identified by the id.", + "operationId": "getClusterName", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Name of the cluster.", + "schema": { + "type": "string" + } + } + }, + "x-group": "cluster" + } + }, + "/hyperv/scvmm/{id}/refresh": { + "post": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Refresh Hyper-V SCVMM metadata", + "description": "Create a job to refresh the metadata for the specified Hyper-V SCVMM.", + "operationId": "refreshHypervScvmm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V SCVMM.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Request ID of the scheduled Hyper-V SCVMM refresh job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_scvmm" + } + }, + "/report/{id}/table": { + "post": { + "tags": [ + "/report" + ], + "summary": "Gets the table associated with the report specified by id and filters", + "description": "Retrieve table data for a specific report.", + "operationId": "getTable", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "report_table_request", + "description": "Definition of items to get from the report table.", + "required": false, + "schema": { + "$ref": "#/definitions/ReportTableRequest" + } + } + ], + "responses": { + "200": { + "description": "Table data for the report.", + "schema": { + "$ref": "#/definitions/TableData" + } + } + }, + "x-group": "internal_report" + } + }, + "/aws/ec2_instance/{id}/snapshot": { + "post": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Initiate on-demand snapshot for EC2 instance", + "description": "Use the object ID for an EC2 instance to Initiate an on-demand snapshot.", + "operationId": "createAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the EC2 instance object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for an on-demand snapshot of an EC2 instance.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Job status for an EC2 on-demand snapshot request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_ec2_instance" + }, + "delete": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Delete all snapshots of an EC2 instance", + "description": "Delete all snapshots of an EC2 instance. To perform this action, the EC2 instance cannot currently be assigned to an SLA Domain.", + "operationId": "deleteAwsEc2InstanceSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the EC2 instance object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted all snapshots for an EC2 instance." + } + }, + "x-group": "aws_ec2_instance" + }, + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get snapshot list for EC2 instance", + "description": "Retrieve information about all of the snapshots of a specified EC2 instance.", + "operationId": "queryAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the EC2 instance object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Information about all snapshots for an EC2 instance.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceSnapshotSummaryListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/polaris/replication/location/{id}": { + "put": { + "tags": [ + "/polaris/replication" + ], + "summary": "Configure a disabled replication location", + "description": "Provide the cluster uuid and name of replication cluster to configure a disabled replication location. WARNING: This will be a no-op if there already exists a replication location with same cluster uuid and is enabled as source or target.", + "operationId": "addDisabledReplicationLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the replication location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Definition of disabled replication location.", + "required": true, + "schema": { + "$ref": "#/definitions/DisabledReplicationLocationDefinition" + } + } + ], + "responses": { + "201": { + "description": "Replication. location with uuid added as a disabled location.", + "schema": { + "$ref": "#/definitions/ReplicationLocationSummary" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "polaris_replication_location" + }, + "patch": { + "tags": [ + "/polaris/replication" + ], + "summary": "Enable a previously disabled replication location as target", + "description": "Provide the cluster uuid, name and nodes information of replication location to enable as replication target. Network setup must be either 'NAT' or 'Private Network'. WARNING: This will be a no-op if the replication location is already enabled as target.", + "operationId": "enableAsReplicationTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the replication location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Definition of replication location to enable as target.", + "required": true, + "schema": { + "$ref": "#/definitions/EnableAsReplicationTargetDefinition" + } + } + ], + "responses": { + "201": { + "description": "Replication. target with uuid successfully enabled (if it was required).", + "schema": { + "$ref": "#/definitions/ReplicationLocationSummary" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "polaris_replication_location" + } + }, + "/vmware/guest_credential/{id}": { + "delete": { + "tags": [ + "/vmware/guest_credential" + ], + "summary": "Remove a specific guest OS credential", + "description": "Remove a specific guest OS credential.", + "operationId": "deleteGuestCredentialById", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the guest OS credential to remove.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully removed given guest OS credential." + } + }, + "x-group": "guest_credential" + }, + "get": { + "tags": [ + "/vmware/guest_credential" + ], + "summary": "Get information about a specific guest OS credential", + "description": "Get information about a specific guest OS credential.", + "operationId": "getGuestCredential", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the guest OS credential.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Object for guest OS credential definition.", + "schema": { + "$ref": "#/definitions/GuestCredentialDetail" + } + } + }, + "x-group": "guest_credential" + }, + "put": { + "tags": [ + "/vmware/guest_credential" + ], + "summary": "Update a specific guest OS credential", + "description": "Update the domain, username and password of given guest OS credential.", + "operationId": "updateGuestCredential", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the guest OS credential to update.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Object for guest OS credential definition.", + "required": true, + "schema": { + "$ref": "#/definitions/GuestCredentialDefinition" + } + } + ], + "responses": { + "200": { + "description": "Returns the updated guest OS credential object.", + "schema": { + "$ref": "#/definitions/GuestCredentialDetail" + } + } + }, + "x-group": "guest_credential" + } + }, + "/volume_group/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/volume_group" + ], + "summary": "Download files from Volume Group snapshot", + "description": "Create a download files request.", + "operationId": "createVolumeGroupDownloadFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a download request.", + "required": true, + "schema": { + "$ref": "#/definitions/VolumeGroupDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the snapshot download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/oracle/db/{id}/recover": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "Instant recovery of a database", + "description": "Creates an instant recover request that restores a target database from the given snapshot.", + "operationId": "instantRecoverOracleSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Oracle database.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration parameters for a job to instantly recover from an Oracle database snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/RecoverOracleDbConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the instant recover request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/vmware/vm/snapshot/mount/count": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Get a count of live mounts", + "description": "Retrieve total number of live mounts.", + "operationId": "countMount", + "parameters": [], + "responses": { + "200": { + "description": "Returns total number of live mounts.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "vm" + } + }, + "/backup_throttle": { + "get": { + "tags": [ + "/backup_throttle" + ], + "summary": "Get backup throttle settings", + "description": "Retrieve the configuration of the backup throttle for the specified Rubrik cluster.", + "operationId": "getThrottlingSettings", + "parameters": [], + "responses": { + "200": { + "description": "Backup throttle configuration. settings.", + "schema": { + "$ref": "#/definitions/ThrottlingSettings" + } + } + }, + "x-group": "backup_throttle" + }, + "put": { + "tags": [ + "/backup_throttle" + ], + "summary": "Change the backup throttle settings", + "description": "Change the backup throttling settings. This request completely replaces the existing settings. Unspecified fields are cleared and are not used for backup throttling.", + "operationId": "updateThrottlingSettings", + "parameters": [ + { + "in": "body", + "name": "settings", + "description": "Backup throttle configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/ThrottlingSettings" + } + } + ], + "responses": { + "200": { + "description": "Successfully changed backup throttle settings.", + "schema": { + "$ref": "#/definitions/ThrottlingSettings" + } + } + }, + "x-group": "backup_throttle" + } + }, + "/node/stats": { + "get": { + "tags": [ + "/node" + ], + "summary": "Gets a list of time-series statistics for each node in this Rubrik cluster", + "description": "Returns the list statistics of all Rubrik nodes.", + "operationId": "getAllNodesStats", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Starting point for a time series. The starting point is expressed as -, where is an integer and is one of: s(seconds), m(minutes), h(hours), d(days). Default value is -1h.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute by which the results are sorted.", + "required": false, + "type": "string", + "enum": [ + "Status", + "Id", + "Name", + "Ip" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "List of node stats.", + "schema": { + "$ref": "#/definitions/NodeStatsListResponse" + } + } + }, + "x-group": "node" + } + }, + "/hyperv/scvmm/request/{id}": { + "get": { + "tags": [ + "/hyperv/scvmm" + ], + "summary": "Get Hyper-V SCVMM async request", + "description": "Get details about a Hyper-V SCVMM related async request.", + "operationId": "getHypervScvmmAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_scvmm" + } + }, + "/aws/account/{id}": { + "delete": { + "tags": [ + "/aws/account" + ], + "summary": "Delete an AWS account", + "description": "Delete an AWS account object specified by its ID. Optionally, delete all existing snapshot data at the cloud location associated with the specified AWS account.", + "operationId": "deleteAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account object to delete.", + "required": true, + "type": "string" + }, + { + "name": "delete_existing_snapshots", + "in": "query", + "description": "Determine whether to delete all existing snapshot data associated with the specified AWS account object. Set to **True** to expire all snapshot data.", + "required": true, + "type": "boolean", + "default": false + } + ], + "responses": { + "202": { + "description": "Created request to delete an AWS account object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_account" + }, + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get AWS account details", + "description": "Retrieve the details of an AWS account with a specified ID.", + "operationId": "getAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "AWS account details.", + "schema": { + "$ref": "#/definitions/AwsAccountDetail" + } + } + }, + "x-group": "aws_account" + }, + "patch": { + "tags": [ + "/aws/account" + ], + "summary": "Update an AWS account", + "description": "Provide updated information for a specified AWS account object.", + "operationId": "updateAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account object to update.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration to use to update an AWS account.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsAccountUpdate" + } + } + ], + "responses": { + "200": { + "description": "Updated AWS account details.", + "schema": { + "$ref": "#/definitions/AwsAccountDetail" + } + } + }, + "x-group": "aws_account" + } + }, + "/stats/unprotected_snappable_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get total storage for snappables that have snapshots but are currently not protected", + "description": "Get total storage for snappables that have snapshots but are currently not protected.", + "operationId": "unprotectedSnappableStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/aws/account/request/{id}": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get AWS account request details", + "description": "Retrieve the details about a specified asynchronous request for an AWS account object.", + "operationId": "getAsyncRequestStatusForAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_account" + } + }, + "/cluster/{id}/security/key_rotation": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get details of key rotations", + "description": "Return detailed information for a key rotation identified by **rotation_id**, or for all key rotations if **rotation_id** is not specified.", + "operationId": "queryKeyRotation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "rotation_id", + "in": "query", + "description": "ID of the key rotation or *latest* for the most recent key rotation.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information for the specified key rotation, or for all key rotations if **rotation_id** is not specified. One list entry is returned for each node.", + "schema": { + "$ref": "#/definitions/KeyRotationDetailListResponse" + } + } + }, + "x-group": "security" + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Rotate keys in encrypted cluster", + "description": "Trigger a key rotation for all nodes in the cluster. A reboot may be required depending on the rotation specification.", + "operationId": "rotateKeys", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "key_rotation_options", + "description": "Key rotation options.", + "required": true, + "schema": { + "$ref": "#/definitions/KeyRotationOptions" + } + } + ], + "responses": { + "202": { + "description": "Succesfully scheduled key rotation.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "security" + } + }, + "/cluster/{id}/pending_node": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get Add node request status", + "description": "Retrieves status of a pending add node request on the specified cluster.", + "operationId": "getAddNodeStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Status of the pending add node request.", + "schema": { + "$ref": "#/definitions/AddNodesStatus" + } + } + }, + "x-group": "cluster" + } + }, + "/organization/{id}/sla_domain": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get sla domains associated with this organization", + "description": "Retrieve the total list of sla domains that have been granted to this organization.", + "operationId": "getOrganizationSlaDomains", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of SLA Domains that have been assigned to this organization.", + "schema": { + "$ref": "#/definitions/SlaDomainSummaryListResponse" + } + } + }, + "x-group": "organization_resource" + } + }, + "/nutanix/vm/snapshot/{id}": { + "delete": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Delete VM snapshot", + "description": "Delete a snapshot by expiring it. Snapshot is expired only if it is a manual snapshot or a snapshot of an unprotected vm.", + "operationId": "deleteNutanixSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Snapshot location to delete. Use **_local_** to delete all local snapshots and **_all_** to delete the snapshot in all locations.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "x-group": "nutanix_vm" + }, + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get VM snapshot details", + "description": "Retrieve detailed information about a snapshot.", + "operationId": "getNutanixSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a snapshot.", + "schema": { + "$ref": "#/definitions/NutanixVmSnapshotDetail" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/polaris/app_blueprint/snapshot/{id}/export": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Export Blueprint snapshot", + "description": "Export the specified Blueprint snapshot into a new Blueprint or an existing Blueprint.", + "operationId": "createAppBlueprintSnapshotExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the request to export the specified Blueprint snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/AppBlueprintExportSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Accepted request for asynchronous job to export a Blueprint snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/vcd/cluster/request/{id}": { + "get": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Get vCD Cluster job status", + "description": "Retrieve the details of a specified asynchronous job for a vCD Cluster. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/cluster/request/{id}`.", + "operationId": "getVcdClusterAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a vCD Cluster asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + } + }, + "/hyperv/hierarchy/{id}/children": { + "get": { + "tags": [ + "/hyperv/hierarchy" + ], + "summary": "Get list of immediate descendant objects", + "description": "Retrieve the list of immediate descendant objects for the specified parent.", + "operationId": "getHypervHierarchyChildren", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent Hyper-V hierarchy object. To get top-level nodes, use **root** as the ID.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "scvmm", + "hostOrCluster", + "cluster", + "host", + "vm" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "status", + "in": "query", + "description": "Filter by status.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendentCount.cluster", + "descendentCount.host", + "descendentCount.vm" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters Hyper-V hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/HypervHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "hyperv_hierarchy" + } + }, + "/organization/{id}/managed_volume": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Managed Volumes in an organization", + "description": "Retrieve summary information for the explicitly authorized Managed Volumes of an organization. Information for a Managed Volume is only included when the organization has an explicit authorization for the Managed Volume. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedManagedVolumes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Managed Volumes.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSummaryListResponse" + } + } + }, + "x-group": "organization_managed_volume" + } + }, + "/mssql/availability_group/{id}": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns detailed information for a Microsoft SQL availability group", + "description": "Returns a detailed view of a Microsoft SQL availability group. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/availability_group/{id}`.", + "operationId": "getMssqlAvailabilityGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL availability group to fetch.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/MssqlAvailabilityGroupDetail" + } + } + }, + "deprecated": true, + "x-group": "mssql_availability_group" + }, + "patch": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Update a Microsoft SQL availability group", + "description": "Update a Microsoft SQL availability group with the specified properties. This endpoint will be removed in CDM v6.0 in favor of `PATCH v1/mssql/availability_group/{id}`.", + "operationId": "updateMssqlAvailabilityGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL availability group to update.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/MssqlAvailabilityGroupUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returned if the update was successful.", + "schema": { + "$ref": "#/definitions/MssqlAvailabilityGroupDetail" + } + } + }, + "deprecated": true, + "x-group": "mssql_availability_group" + } + }, + "/storage/array/{id}/refresh": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Refresh storage array metadata", + "description": "Create an asynchronous job to refresh the metadata for a specified storage array object.", + "operationId": "createStorageArrayRefresh", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous request to update storage array metadata.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array" + } + }, + "/user/{id}/organization": { + "get": { + "tags": [ + "/user" + ], + "summary": "Get Organization access list", + "description": "Retrieve a list of all of the Organization objects that the current session user can access.", + "operationId": "queryOrganizationsByUser", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of user. The value must be me, which stands for the current session user.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "name", + "in": "query", + "description": "An optional field that specifies a string. The returned list of organizations is filtered to show only organizations with names that contain the specified string.", + "required": false, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting offset of the returned results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Access list of Organization objects.", + "schema": { + "$ref": "#/definitions/OrganizationSummaryListResponse" + } + } + }, + "x-group": "organization" + } + }, + "/event_series/status": { + "post": { + "tags": [ + "/event_series" + ], + "summary": "Get information for several events", + "description": "Use the event series IDs and jobInstanceIds of several events to retrieve information about each event, including ID, progress, cancelable, cancel pending, and the number of times the event has been attempted. JobInstanceId can be left unspecified in case of non job related events, but is expected for all job related events.", + "operationId": "bulkEventStatus", + "parameters": [ + { + "in": "body", + "name": "ids", + "description": "Event series detail for the event.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/EventSeriesDetail" + } + } + } + ], + "responses": { + "200": { + "description": "Returns information for the events specified by event_series_ids.", + "schema": { + "$ref": "#/definitions/EventStatusSummaryListResponse" + } + } + }, + "x-group": "events" + } + }, + "/volume_group/{id}/search": { + "get": { + "tags": [ + "/volume_group" + ], + "summary": "Search for file in Volume Group", + "description": "Search for a file within the Volume Group. Search via full path prefix or filename prefix.", + "operationId": "searchVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Volume Group.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Either path prefix or filename prefix.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "volume_group" + } + }, + "/search/global": { + "post": { + "tags": [ + "/search" + ], + "summary": "Search globally for a file", + "description": "Perform a search for a file across multiple snappables filtering by regular expression.", + "operationId": "globalSearchApiQuery", + "parameters": [ + { + "in": "body", + "name": "query", + "description": "Global search query.", + "required": true, + "schema": { + "$ref": "#/definitions/GlobalSearchApiRequest" + } + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/GlobalSearchApiResponseListResponse" + } + } + }, + "x-group": "search" + } + }, + "/managed_volume": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Create a managed volume", + "description": "Create a managed volume.", + "operationId": "createManagedVolume", + "parameters": [ + { + "in": "body", + "name": "managed_volume_config", + "description": "Managed volume configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeConfig" + } + } + ], + "responses": { + "201": { + "description": "Details about the created managed volume.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSummary" + } + } + }, + "x-group": "managed_volume" + }, + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get summary information for managed volumes", + "description": "Retrieve summary information for all managed volumes that are created within a Rubrik cluster.", + "operationId": "queryManagedVolume", + "parameters": [ + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field. Return both relic and non-relic managed volumes if this query is not set. Relics are deleted data sources with snapshots that are still available.", + "required": false, + "type": "boolean" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filters the summary information based on the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "name", + "in": "query", + "description": "Filter by name field. Returns only the managed volume objects for which the complete name value exactly matches the complete search string value.", + "required": false, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "managed_volume_type", + "in": "query", + "description": "Filter by the type of managed volume.", + "required": false, + "type": "string", + "enum": [ + "AlwaysMounted", + "SlaBased" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the managed volume attribute to use in sorting the managed volume summary information. Performs an ASCII sort of the summary information using the specified attribute, in the order specified.\nValid attribute is 'name'.", + "required": false, + "type": "string", + "enum": [ + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for managed volumes.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSummaryListResponse" + } + } + }, + "x-group": "managed_volume" + } + }, + "/vmware/config/datastore_freespace_threshold": { + "get": { + "tags": [ + "/vmware/config" + ], + "summary": "Determine the configured value for the datastore freespace threshold", + "description": "Determine the value for the datastore freespace threshold for VMware virtual machine backups.", + "operationId": "queryDatastoreFreespaceThreshold", + "parameters": [ + { + "name": "vm_id", + "in": "query", + "description": "ID of the VMware virtual machine.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Threshold value of the freespace on the datastores that allow the VMware virtual machine backup.", + "schema": { + "$ref": "#/definitions/VmwareDatastoreFreespaceThreshold" + } + } + }, + "x-group": "vmware_config" + } + }, + "/node_management/hostname": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "hostname supporting the current server", + "description": "hostname supporting the current server.", + "operationId": "getHostname", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/polaris/replication/source/{id}/refresh": { + "post": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Schedules a Polaris replication source refresh", + "description": "Triggers a replication source refresh from remote Polaris site to this cluster. This CDM cluster should go to the post URL and fetch the metadata package prepared by Polaris. Provide the managed ID of the Polaris replication source, and customer object store credentials where the metadata is stored.", + "operationId": "schedulePolarisReplicationSourceRefresh", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of the Polaris replication source.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Polaris replication source refresh definition.", + "required": true, + "schema": { + "$ref": "#/definitions/PolarisReplicationSourceRefreshDefinition" + } + } + ], + "responses": { + "202": { + "description": "Polaris metadata refresh successfully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/polaris/app_blueprint/request/{id}": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get Blueprint job status", + "description": "Retrieve the details of a specified asynchronous job for a Blueprint.", + "operationId": "getAppBlueprintAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a Blueprint asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/mssql/db/{id}/compatible_instance": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns instances that are compatible for a Microsoft SQL database recovery", + "description": "Returns all compatible instances for export for a given recovery time. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/{id}/compatible_instance`.", + "operationId": "getCompatibleMssqlInstances", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL database.", + "required": true, + "type": "string" + }, + { + "name": "recovery_time", + "in": "query", + "description": "Time to recover to. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678Z\". If this is not specified, the latest recoverable time will be used.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "recovery_type", + "in": "query", + "description": "Recovery type.", + "required": true, + "type": "string", + "enum": [ + "Mount", + "Export", + "Restore" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for all compatible Microsoft SQL instances for export.", + "schema": { + "$ref": "#/definitions/MssqlInstanceSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/aws/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/aws/hierarchy" + ], + "summary": "Get full hierarchy for AWS account object", + "description": "Retrieve summary information for all AWS instance objects that are descendants of a specified AWS account object.", + "operationId": "queryAwsHierarchyDescendants", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account object. To specify the primary AWS account object, use root.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for AWS account and instance objects by matching a string to a part of the name of the AWS account object and to a part of the name or ID of the AWS instance object.", + "required": false, + "type": "string" + }, + { + "name": "region", + "in": "query", + "description": "Filter the query results using the region of the AWS instance object.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Specifies the method used to apply an SLA Domain to an object. Possible values are Derived, Direct, and Unassigned.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter query results using the effective SLA Domain ID of the objects.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter the query results by using the ID of the primary Rubrik cluster. Use local to refer to the Rubrik cluster that is hosting the current API session.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "default": "name", + "enum": [ + "name", + "accountName", + "instanceId", + "instanceName", + "instanceType", + "region", + "effectiveSlaDomainName", + "slaAssignment", + "descendantCountEc2Instance" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Determines whether to fetch AWS hierarchy objects with additional privilege checks.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Descendant objects of an AWS account object.\n", + "schema": { + "$ref": "#/definitions/AwsHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "aws_hierarchy" + } + }, + "/job/type/clean_up_references": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Creates a job to clean up references (semaphores, throttles, replication targets)", + "description": "REQUIRES SUPPORT TOKEN - Creates a job to clean up references (semaphores, throttles, replication targets). A support token is required for this operation.", + "operationId": "createCleanUpReferences", + "parameters": [], + "responses": { + "201": { + "description": "Returned if the job creation succeeds.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Returned if the job creation fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_job" + } + }, + "/vmware/config/set_esx_subnets": { + "patch": { + "tags": [ + "/vmware/config" + ], + "summary": "Set ESXi subnets", + "description": "Set the subnets that should be used to reach ESXi hosts.", + "operationId": "setEsxSubnets", + "parameters": [ + { + "in": "body", + "name": "new_esx_subnets", + "description": "Preferred subnets to reach to ESX hosts. The format should be the comma separated list without any spaces, for example, 192.168.2.10/24,10.255.0.2/16.", + "required": true, + "schema": { + "$ref": "#/definitions/EsxSubnets" + } + } + ], + "responses": { + "200": { + "description": "The updated preferred subnets to reach ESX hosts.", + "schema": { + "$ref": "#/definitions/EsxSubnets" + } + } + }, + "x-group": "vmware_config" + } + }, + "/report/{id}/csv_link": { + "get": { + "tags": [ + "/report" + ], + "summary": "Get the link for CSV file of the report specified by id", + "description": "Get the link to a CSV file for a report. This is a synchronous operation.", + "operationId": "getCsvDownloadLink", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Download link of the requested CSV file.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_report" + } + }, + "/snapshot/archive_garbage_collect_status": { + "post": { + "tags": [ + "/snapshot" + ], + "summary": "REQUIRES SUPPORT TOKEN - Check whether snapshots have been GC''d on archive", + "description": "REQUIRES SUPPORT TOKEN - Check whether snapshots have been GC''d on archive. A support token is required for this operation.", + "operationId": "snapshotArchiveGarbageCollectionStatus", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "List of snapshot IDs and location ID to check if GC'd.", + "required": true, + "schema": { + "$ref": "#/definitions/InternalSnapshotArchiveGarbageCollectStatusConfig" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/InternalGarbageCollectStatusResult" + } + } + }, + "x-group": "internal_snapshot" + } + }, + "/cloud_on/azure/subscription": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Get all the subscriptions for an azure tenant", + "description": "Get all the subscriptions for an azure tenant accessible to a particular client app.\n", + "operationId": "getAzureSubscriptions", + "parameters": [ + { + "in": "body", + "name": "azure_subscription_request", + "description": "An Azure subscription request that contains the credentials required to find the Azure subscription group and compute proxy configuration.\n", + "required": true, + "schema": { + "$ref": "#/definitions/AzureSubscriptionRequest" + } + } + ], + "responses": { + "200": { + "description": "Returns a list of Azure subscription ID/name pairs. The ID is the unique identifier and the name is the display name.\n", + "schema": { + "$ref": "#/definitions/IdNamePairListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/organization/{id}/envoy/{envoy_id}": { + "delete": { + "tags": [ + "/organization" + ], + "summary": "Remove Rubrik Envoy", + "description": "Remove a Rubrik Envoy object from an organization and delete the object from the Rubrik cluster.", + "operationId": "deleteEnvoy", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an organization object.", + "required": true, + "type": "string" + }, + { + "name": "envoy_id", + "in": "path", + "description": "ID assigned to a Rubrik Envoy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Rubrik Envoy object." + } + }, + "x-group": "organization_resource" + }, + "get": { + "tags": [ + "/organization" + ], + "summary": "Get a Rubrik Envoy object", + "description": "Retrieve the details of a specified Rubrik Envoy object.", + "operationId": "getEnvoy", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an organization object.", + "required": true, + "type": "string" + }, + { + "name": "envoy_id", + "in": "path", + "description": "ID assigned to a Rubrik Envoy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a specified Rubrik Envoy object.", + "schema": { + "$ref": "#/definitions/EnvoyDetail" + } + } + }, + "x-group": "organization_resource" + }, + "patch": { + "tags": [ + "/organization" + ], + "summary": "Update Rubrik Envoy", + "description": "Change one or more of the properties of a specified Rubrik Envoy object.", + "operationId": "updateEnvoy", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an organization object.", + "required": true, + "type": "string" + }, + { + "name": "envoy_id", + "in": "path", + "description": "ID assigned to a Rubrik Envoy object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to assign to the specified Rubrik Envoy object.", + "required": true, + "schema": { + "$ref": "#/definitions/EnvoyUpdate" + } + } + ], + "responses": { + "200": { + "description": "Details for a Rubrik Envoy object.", + "schema": { + "$ref": "#/definitions/EnvoyDetail" + } + } + }, + "x-group": "organization_resource" + } + }, + "/role/authorization_query": { + "post": { + "tags": [ + "/authorization" + ], + "summary": "Query explicit authorizations", + "description": "Query the current list of explicit authorizations.", + "operationId": "queryAuthorizations", + "parameters": [ + { + "in": "body", + "name": "authorization_query", + "description": "Query describing what authorizations to retrieve.", + "required": true, + "schema": { + "$ref": "#/definitions/AuthorizationQuery" + } + } + ], + "responses": { + "200": { + "description": "Summary of authorizations granted to the roles.", + "schema": { + "$ref": "#/definitions/RoleAuthorizationSummaryListResponse" + } + } + }, + "x-group": "role_authorization" + } + }, + "/folder/{id}": { + "get": { + "tags": [ + "/folder" + ], + "summary": "Details of a folder", + "description": "Details of a folder.", + "operationId": "getFolder", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Folder.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a folder.", + "schema": { + "$ref": "#/definitions/FolderDetail" + } + } + }, + "x-group": "folder" + } + }, + "/vmware/vcenter/tag/{tag_id}": { + "get": { + "tags": [ + "/vmware/vcenter" + ], + "summary": "Get Tag by Id", + "description": "Get a Tag in a vCenter by its ID.", + "operationId": "getVsphereTag", + "parameters": [ + { + "name": "tag_id", + "in": "path", + "description": "ID of the Tag.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A vCenter Tag.", + "schema": { + "$ref": "#/definitions/VsphereTag" + } + } + }, + "x-group": "vcenter" + } + }, + "/archive/dca/reconnect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Reconnect to a specific DCA archival location", + "description": "Reconnect to a specific DCA archival location. Initiates an asynchronous job to connect to the archival location. This operation is deprecated. Use /archive/dca/reader/connect instead.", + "operationId": "reconnectDcaLocation", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaLocationDefinition" + } + } + ], + "responses": { + "202": { + "description": "Returns the job instance id for the reconnect job.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "deprecated": true, + "x-group": "archival" + } + }, + "/cloud_on/azure/subnet": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get a list of subnets queried by Azure location ID", + "operationId": "queryAzureSubnet", + "parameters": [ + { + "name": "data_location_id", + "in": "query", + "description": "data location ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns list of subnet IDs and respective Vnet IDs in Azure.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Subnet" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/cloud_on/aws/image": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Query for a list of a AWS cloud images", + "description": "Query for a list of a AWS cloud images.", + "operationId": "queryAwsPublicCloudMachineImage", + "parameters": [ + { + "name": "snappable_id", + "in": "query", + "description": "Filters AWS cloud images by snappable ID.", + "required": false, + "type": "string" + }, + { + "name": "location_id", + "in": "query", + "description": "Filters AWS cloud images by location ID.", + "required": false, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is to return all available records.\n", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "snappable_name", + "in": "query", + "description": "Filters results to images with source snappable names containing the given query.\n", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "default": "SourceVmName", + "enum": [ + "SourceVmName", + "LocationName", + "InstanceType", + "CreatedBy", + "Status", + "SnapshotTime", + "CreationTime" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for all AWS cloud images.", + "schema": { + "$ref": "#/definitions/AwsImageSummaryListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/report/request/{id}": { + "get": { + "tags": [ + "/report" + ], + "summary": "Get asynchronous request details for data source CSV download job", + "description": "Get the details of an asynchronous request that generates data source CSV download file.", + "operationId": "getDataSourceCsvAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous data source CSV request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "internal_report" + } + }, + "/vcd/vapp/{id}/snapshot": { + "post": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Create an on-demand snapshot for a vApp", + "description": "Start an asynchronous job to create an on-demand snapshot for a specified vApp object. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/vapp/{id}/snapshot`.", + "operationId": "createOnDemandSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vApp object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand backup of a specified vApp object.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of async job for an on-demand snapshot of a vApp.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + }, + "delete": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Delete all snapshots of vApp", + "description": "Delete all snapshots for a specified vApp object. This endpoint will be removed in CDM v6.1 in favor of `DELETE v1/vcd/vapp/{id}/snapshot`.", + "operationId": "deleteVappSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vApp object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + }, + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get list of snapshots of vApp", + "description": "Retrieve summary information for each of the snapshot objects of a specified vApp object. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/{id}/snapshot`.", + "operationId": "queryVappSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vApp object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary snapshot information for a vApp object.", + "schema": { + "$ref": "#/definitions/VcdVappSnapshotSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/oracle/db": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "(DEPRECATED) Get summary information for Oracle databases", + "description": "Retrieve an array containing summary information for the Oracle database objects on the Rubrik cluster. This endpoint will be removed in Rubrik CDM 7.0 in favor of `GET v1/oracle/db`.", + "operationId": "queryOracleDb", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filter a response by making an infix comparison of the database name, SID and tablespaces in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Limit a response to the results that have the specified SLA Domain assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by effective SLA Domain ID.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Limit a response to the results that have the specified primary cluster value.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Limit a response to the results that have the specified isRelic value.", + "required": false, + "type": "boolean" + }, + { + "name": "is_live_mount", + "in": "query", + "description": "Limit a response to the results that have the specified isLiveMount value.", + "required": false, + "type": "boolean" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the summary information to a specified maximum number of matches. Optionally, use with offset to start the count at a specified point. Optionally, use with sort_by to perform sort on given attributes. Include sort_order to determine the ascending or descending direction of sort.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of matches. The response includes the specified numbered entry and all higher numbered entries. Use with limit to retrieve the response as smaller groups of entries, for example for paging of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies a comma-separated list of attributes to use in sorting the matches. Performs an ASCII sort of the values in the response using each specified attribute, in the order specified.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "include_backup_task_info", + "in": "query", + "description": "Include backup task information in response.", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleDbSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "oracle_db" + } + }, + "/volume_group/snapshot/{id}/download": { + "post": { + "tags": [ + "/volume_group" + ], + "summary": "Creates a download from archival request", + "description": "Download a snapshot from archival.", + "operationId": "createDownloadSnapshotForVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/vmware/data_center/{id}": { + "get": { + "tags": [ + "/vmware/data_center" + ], + "summary": "Gets details about the specific DataCenter", + "description": "Returns details about the specific DataCenter.", + "operationId": "getDataCenter", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the data center that needs to be fetched.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the data center object matching given ID.", + "schema": { + "$ref": "#/definitions/DataCenterDetail" + } + }, + "404": { + "description": "Returned if data center with given ID cannot be found." + } + }, + "x-group": "data_center" + } + }, + "/polaris/replication/source": { + "post": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Configure a Polaris account as a replication source", + "description": "Configure a Polaris account as a replication source to this cluster. Provide Polaris ID (account database name), which is a global unique ID that representing this Polaris account, account ID and account name.", + "operationId": "addPolarisReplicationSource", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Polaris Replication source definition.", + "required": true, + "schema": { + "$ref": "#/definitions/PolarisReplicationSourceDefinition" + } + } + ], + "responses": { + "201": { + "description": "Polaris source successfully added.", + "schema": { + "$ref": "#/definitions/PolarisReplicationSourceSummary" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/cluster/{id}/trial_edge": { + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Extend trial", + "description": "Extend the trial period.", + "operationId": "extendTrial", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returns the days and extensions left for the trial.", + "schema": { + "$ref": "#/definitions/EdgeTrialStatus" + } + } + }, + "x-group": "cluster" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get information about Rubrik Edge Trial", + "description": "Query Rubrik Edge to confirm whether it is the Trial version and to obtain Trial-related information.", + "operationId": "getEdgeTrialStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The ID assigned to a Rubrik Edge instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Rubrik Edge Trial information.", + "schema": { + "$ref": "#/definitions/EdgeTrialStatus" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/network_throttle": { + "get": { + "tags": [ + "/network_throttle" + ], + "summary": "Get network throttles", + "description": "Retrieve information about the network throttles that are configured for the specified Rubrik cluster.", + "operationId": "queryNetworkThrottle", + "parameters": [ + { + "name": "resource_id", + "in": "query", + "description": "Filter network throttle information to only include the specified resource.", + "required": false, + "type": "string", + "enum": [ + "ArchivalEgress", + "ReplicationEgress" + ] + } + ], + "responses": { + "200": { + "description": "List of network throttles.", + "schema": { + "$ref": "#/definitions/NetworkThrottleSummaryListResponse" + } + } + }, + "x-group": "network_throttle" + } + }, + "/storage/array/{id}": { + "delete": { + "tags": [ + "/storage/array" + ], + "summary": "Remove a specific storage array", + "description": "Create an asynchronous job for removing a specified storage array object.", + "operationId": "deleteStorageArrayById", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the storage array to remove.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a request for an asynchronous job to remove a storage array object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get information about a specific storage array", + "description": "Get information about a specific storage array.", + "operationId": "getStorageArray", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the storage array.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns JSON object containing information storage array.", + "schema": { + "$ref": "#/definitions/StorageArrayDetail" + } + } + }, + "x-group": "storage_array" + }, + "put": { + "tags": [ + "/storage/array" + ], + "summary": "Update a specific storage array", + "description": "Update the host ip, username, and/or password of given storage array.", + "operationId": "updateStorageArray", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the storage array to update.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "JSON object for storage array definition.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayDefinition" + } + } + ], + "responses": { + "200": { + "description": "Returns the updated storage array object.", + "schema": { + "$ref": "#/definitions/StorageArrayDetail" + } + } + }, + "x-group": "storage_array" + } + }, + "/managed_volume/{id}/snapshot": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get summary information for snapshots of a managed volume", + "description": "Returns a list of summary information for snapshots of a managed volume.", + "operationId": "queryManagedVolumeSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the managed volume.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary information for snapshots.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotSummaryListResponse" + } + } + }, + "x-group": "managed_volume" + }, + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Take an on-demand snapshot of an SLA Managed Volume", + "description": "Create a job for an on-demand snapshot of an SLA Managed Volume. The response returns a request ID. To see the status of the request, poll 'managed-volume/request/{id}' with the request ID obtained in the response.", + "operationId": "takeManagedVolumeOnDemandSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the SLA Managed Volume.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand snapshot.", + "required": false, + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the on-demand snapshot request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/organization/{id}/windows": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Windows hosts in an organization", + "description": "Retrieve summary information for the explicitly authorized Windows hosts of an organization. Information for a Windows host is only included when the organization has an explicit authorization for the host. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedWindowsHosts", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Windows hosts.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "organization_host" + } + }, + "/cluster/{id}/registration_details": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Submit the registration details for a Rubrik cluster", + "description": "Submit the registration details for the specified Rubrik cluster.", + "operationId": "processRegistrationDetails", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or use *me* for the Rubrik cluster that is hosting the current session.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "registration_details", + "description": "Registration details assigned to a Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/RegistrationDetails" + } + } + ], + "responses": { + "204": { + "description": "Successfully applied the given registration details." + } + }, + "x-group": "cluster" + } + }, + "/config/usersettable_shield": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Shield configuration", + "description": "Fetch the global Shield configuration.", + "operationId": "getUserSettableShieldConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalShieldConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Shield configuration", + "description": "Update the global Shield configuration.", + "operationId": "updateUserSettableShieldConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalShieldConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalShieldConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/stats/replication/incoming/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get a timeseries of total incoming bandwidth from the replication clusters", + "description": "Get the total incoming bandwidth from the replication clusters.", + "operationId": "replicationBandwidthIncoming", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TimeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/nutanix/cluster/request/{id}": { + "get": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Get Nutanix cluster async request", + "description": "Get details about a Nutanix cluster-related async request.", + "operationId": "getNutanixClusterAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_cluster" + } + }, + "/smtp_instance": { + "post": { + "tags": [ + "/smtp_instance" + ], + "summary": "Add smtp instances", + "description": "Add smtp instances.", + "operationId": "createSmtpInstance", + "parameters": [ + { + "in": "body", + "name": "instance", + "description": "smtp instance.", + "required": true, + "schema": { + "$ref": "#/definitions/SmtpInstanceDefinition" + } + } + ], + "responses": { + "201": { + "description": "SMTP instance details.", + "schema": { + "$ref": "#/definitions/SmtpInstanceDetail" + } + } + }, + "x-group": "smtp_instance" + }, + "get": { + "tags": [ + "/smtp_instance" + ], + "summary": "Get summary of all smtp instances", + "description": "Get summary of all smtp instances.", + "operationId": "querySmtpInstance", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/SmtpInstanceDetailListResponse" + } + } + }, + "x-group": "smtp_instance" + } + }, + "/cluster/{id}/has_tpm": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get if it has TPM", + "description": "Check whether this Rubrik cluster has TPM.", + "operationId": "hasTpm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "True when the Rubrik cluster has TPM.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/hyperv/host/{id}": { + "get": { + "tags": [ + "/hyperv/host" + ], + "summary": "Get details of a Hyper-V host", + "description": "Get details of a Hyper-V host.", + "operationId": "getHypervHost", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V host.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details about the Hyper-V host.", + "schema": { + "$ref": "#/definitions/HypervHostDetail" + } + } + }, + "x-group": "hyperv_host" + }, + "patch": { + "tags": [ + "/hyperv/host" + ], + "summary": "Update Hyper-V host", + "description": "Update host with specified properties.", + "operationId": "updateHypervHost", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Hyper-V host.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervHostUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about virtual machine.", + "schema": { + "$ref": "#/definitions/HypervHostDetail" + } + } + }, + "x-group": "hyperv_host" + } + }, + "/oracle/host": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get summary information for Oracle Host", + "description": "Retrieve an array containing summary information for the Oracle Host objects on the Rubrik cluster.", + "operationId": "queryOracleHost", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filter a response by making an infix comparison of the Oracle Host name in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Limit a response to the results that have the specified SLA Domain assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Limit a response to the results that have the specified primary cluster value.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the summary information to a specified maximum number of matches. Optionally, use with offset to start the count at a specified point. Optionally, use with sort_by to perform sort on given attributes. Include sort_order to determine the ascending or descending direction of sort.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of matches. The response includes the specified numbered entry and all higher numbered entries. Use with limit to retrieve the response as smaller groups of entries, for example for paging of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies a comma-separated list of attributes to use in sorting the matches. Performs an ASCII sort of the values in the response using each specified attribute, in the order specified.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleHostSummaryListResponse" + } + } + }, + "x-group": "oracle_host" + } + }, + "/hyperv/vm": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get list of VMs", + "description": "Get summary of all the VMs.", + "operationId": "queryHypervVirtualMachine", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter the summary information based on the relic status of the VM. Returns both relic and non relic if the parameter is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by the given attribute.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Get page summary about virtual machine.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineSummaryListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/oracle/db/{id}/log_backup": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "On-demand log backup for an Oracle database log", + "description": "Create an asynchronous job for an on-demand backup of an Oracle database log. The response includes an ID for the asynchronous job request. To see the status of the request, poll /oracle/request/{id}.", + "operationId": "createOnDemandOracleLogBackup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Request status for an async job to create an on-demand backup of an Oracle database log.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/stats/logical_ingest/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get timeseries on logical data ingestion", + "description": "Get timeseries on logical data ingestion.", + "operationId": "logicalIngest", + "parameters": [], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/archive/qstar/reconnect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Reconnect to a QStar archival location", + "description": "Reconnect to a specified QStar archival location. Initiates an asynchronous job to connect to the archival location. This operation is deprecated. Use /archive/qstar/reader/connect instead.", + "operationId": "reconnectQstar", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Access credentials for the specified QStar archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/QstarLocationDefinitionWithCredential" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to connect to a QStar archival location as a reader cluster.\n", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "deprecated": true, + "x-group": "archival" + } + }, + "/aws/ec2_instance/snapshot/{id}": { + "delete": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Delete EC2 instance snapshot", + "description": "Delete an EC2 instance snapshot by resetting the retention to cause immediate expiration. The snapshot must be an on-demand snapshot or a snapshot from an EC2 instance that is not currently managed.", + "operationId": "deleteAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted an EC2 snapshot." + } + }, + "x-group": "aws_ec2_instance" + }, + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get details for an EC2 instance snapshot", + "description": "Retrieve detailed information about an EC2 instance snapshot.", + "operationId": "getAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of a specified snapshot.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceSnapshotDetail" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/vcd/vapp/{id}/missed_snapshot": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get details about missed snapshots for a vApp", + "description": "Retrieve the timestamp for each missed snapshot for a specified vApp. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/{id}/missed_snapshot`.", + "operationId": "vcdMissedSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vApp.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for missed snapshots for a vApp.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/node/{id}": { + "get": { + "tags": [ + "/node" + ], + "summary": "Detailed view of a Rubrik node", + "description": "Returns the complete view of a Rubrik node.", + "operationId": "getNode", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the node to load.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information about the node.", + "schema": { + "$ref": "#/definitions/NodeInfo" + } + } + }, + "x-group": "node" + } + }, + "/stats/cross_compression": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get compression stats for cross increments", + "description": "Get compression stats for cross increments.", + "operationId": "crossCompression", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/storage/array_volume_group/{id}": { + "delete": { + "tags": [ + "/storage/array" + ], + "summary": "Remove a volume group", + "description": "Removes a specified storage array volume group object.", + "operationId": "deleteStorageArrayVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + }, + { + "name": "preserve_snapshots", + "in": "query", + "description": "Flag to indicate whether to preserve snapshots of the storage array volume group or to delete them. Default behavior is to preserve them.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "204": { + "description": "Successfully removed a storage array volume group object." + } + }, + "x-group": "storage_array_volume_group" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get volume group details", + "description": "Retrieve the properties of a specified storage array volume group object.", + "operationId": "getStorageArrayVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Properties of a storage array volume group object.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupDetail" + } + } + }, + "x-group": "storage_array_volume_group" + }, + "patch": { + "tags": [ + "/storage/array" + ], + "summary": "Update a volume group", + "description": "Update the properties of a specified volume group object.", + "operationId": "patchStorageArrayVolumeGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "volume_group_patch_properties", + "description": "Properties to update for the specified volume group object.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupPatch" + } + } + ], + "responses": { + "200": { + "description": "Updated properties for a storage array volume group object.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupDetail" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/cluster/{id}/is_single_node": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get is single node", + "description": "Check whether this Rubrik cluster is configured to be single node.", + "operationId": "isSingleNode", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "True when the Rubrik cluster has a single node.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/aws/ec2_instance/snapshot/{id}/browse": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get directories and files in EC2 instance snapshot", + "description": "Retrieve a directory and file listing for a specified EC2 instance snapshot. The listing starts at a point in the directory hierarchy specified by an absolute path value.", + "operationId": "browseAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot object.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "Absolute path value that specifies a point in the directory hierarchy to begin the directory and file listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/cluster/{id}/auto_removed_node/{node_id}/acknowledge": { + "delete": { + "tags": [ + "/cluster" + ], + "summary": "Acknowledge node removal", + "description": "Acknowledge automatic removal of a node from the Rubrik cluster.", + "operationId": "acknowledgeAutoRemovedNode", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster. Use the string \"me\" for a cluster to identify itself.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "node_id", + "in": "path", + "description": "ID of the node to acknowledge auto removal of.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully acknowledged auto removal of the node." + } + }, + "x-group": "cluster" + } + }, + "/cluster/{id}/global_manager": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "global", + "description": "global.", + "operationId": "getGlobalManager", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Global.", + "schema": { + "$ref": "#/definitions/GlobalManagerConnectionInfo" + } + } + }, + "x-group": "cluster" + }, + "put": { + "tags": [ + "/cluster" + ], + "summary": "global", + "description": "global.", + "operationId": "setGlobalManager", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "connection_update", + "description": "connection.", + "required": true, + "schema": { + "$ref": "#/definitions/GlobalManagerConnectionUpdate" + } + } + ], + "responses": { + "200": { + "description": "Global.", + "schema": { + "$ref": "#/definitions/GlobalManagerConnectionInfo" + } + } + }, + "x-group": "cluster" + } + }, + "/sla_domain/{id}/assign": { + "post": { + "tags": [ + "/sla_domain" + ], + "summary": "Assign managed entities to an SLA Domain synchronously", + "description": "Assign managed entities to the specified SLA Domain. The assignment event runs synchronously.", + "operationId": "assignToSlaAndWaitForJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the SLA Domain.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "assignment_info", + "description": "A comma-separated list of the IDs of the managed entities being assigned to the SLA Domain.", + "required": true, + "schema": { + "$ref": "#/definitions/SlaDomainAssignmentInfo" + } + } + ], + "responses": { + "204": { + "description": "Assigned managed entities to the specified SLA Domain." + } + }, + "x-group": "sla_domain" + } + }, + "/mssql/db/bulk/snapshot/{id}": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns details for an on-demand backup of multiple Microsoft SQL databases", + "description": "Returns the details for an on-demand backup of multiple Microsoft SQL databases. This only returns details for requests that have successfully finished. To check the status of the request, poll /mssql/request/{id}. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/bulk/snapshot/{id}`.", + "operationId": "getOnDemandMssqlBatchBackupResult", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the on-demand backup request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Result of the on-demand backup request.", + "schema": { + "$ref": "#/definitions/MssqlBatchBackupSummary" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/notification_setting/{id}": { + "delete": { + "tags": [ + "/notification_setting" + ], + "summary": "Delete a Notification Setting", + "description": "Delete a specified Notification Setting.", + "operationId": "deleteNotificationSetting", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Notification Setting.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Deleted the specified Notification Setting." + }, + "404": { + "description": "Failed to delete the specified Notification Setting." + } + }, + "x-group": "events" + }, + "get": { + "tags": [ + "/notification_setting" + ], + "summary": "Get a Notification Setting", + "description": "Get a Notification Setting by its ID.", + "operationId": "getNotificationSetting", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Notification Setting.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The specified Notification Setting.", + "schema": { + "$ref": "#/definitions/NotificationSettingSummary" + } + } + }, + "x-group": "events" + }, + "patch": { + "tags": [ + "/notification_setting" + ], + "summary": "Update a Notification Setting", + "description": "Make changes to the parameters of a specified Notification Setting.", + "operationId": "updateNotificationSetting", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Notification Setting.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "notification_setting_update", + "description": "Updated parameters for the specified Notification Setting.", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationSettingSummary" + } + } + ], + "responses": { + "200": { + "description": "Information for the updated Notification Setting.", + "schema": { + "$ref": "#/definitions/NotificationSettingSummary" + } + } + }, + "x-group": "events" + } + }, + "/organization/{id}/aws": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized AWS resources in an organization", + "description": "Retrieve summary information for the explicitly authorized AWS resources of an organization. Information for a AWS resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedAwsResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get AWS resources.", + "schema": { + "$ref": "#/definitions/AwsHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_cloud_native" + } + }, + "/aws/hierarchy/{id}/children": { + "get": { + "tags": [ + "/aws/hierarchy" + ], + "summary": "Get direct children of AWS account object", + "description": "Retrieve summary information for all AWS instance objects that are direct children of a specified AWS account object.", + "operationId": "queryAwsHierarchyChildren", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS account object. To specify the primary AWS account object, use root.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for AWS account and instance objects by matching a string to a part of the name of the AWS account object and to a part of the name or ID of the AWS instance object.", + "required": false, + "type": "string" + }, + { + "name": "region", + "in": "query", + "description": "Filter the query results using the region of the AWS instance object.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Specifies the method used to apply an SLA Domain to an object. Possible values are Derived, Direct, and Unassigned.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter query results using the effective SLA Domain ID of the objects.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter the query results by using the ID of the primary Rubrik cluster. Use local to refer to the Rubrik cluster that is hosting the current API session.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "default": "name", + "enum": [ + "name", + "accountName", + "instanceId", + "instanceName", + "instanceType", + "region", + "effectiveSlaDomainName", + "slaAssignment", + "descendantCountEc2Instance" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Determines whether to fetch AWS hierarchy objects with additional privilege checks.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Get all the immediate children objects of the given parent AWS Object Node.\n", + "schema": { + "$ref": "#/definitions/AwsHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "aws_hierarchy" + } + }, + "/folder/hierarchy_rooted_at/{id}": { + "get": { + "tags": [ + "/folder" + ], + "summary": "Hierarchy of folders and managed objects with this folder as root", + "description": "Hierarchy of folders and managed objects with this folder as root.", + "operationId": "getFolderHierarchy", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the folder at the root of expected hierarchy.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Contents of the folder are divided into two lists: subfolders, and other contents.", + "schema": { + "$ref": "#/definitions/FolderHierarchy" + } + } + }, + "x-group": "folder" + } + }, + "/organization/{id}/users_and_groups": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized users and groups in an organization", + "description": "Retrieve summary information for the explicitly authorized users and groups of an organization. Information for a user or a group is only included when the user or group has an explicit authorization. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedUsersAndGroups", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Users and Groups.", + "schema": { + "$ref": "#/definitions/PrincipalSummaryListResponse" + } + } + }, + "x-group": "organization_resource" + } + }, + "/archive/location/{id}/owner/resume": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Resume archiving", + "description": "Resume archiving to a specified archival location of the current Rubrik cluster.\n", + "operationId": "resumeArchivalLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an archival location object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully resumed archiving to a previously paused archival location.\n" + } + }, + "x-group": "archival" + } + }, + "/snapshot/{id}/diagnostic": { + "get": { + "tags": [ + "/snapshot" + ], + "summary": "REQUIRES SUPPORT TOKEN - Returns the diagnostic info of a snapshot", + "description": "REQUIRES SUPPORT TOKEN - It returns some diagnostic info of a snapshot, right now only compression type is returned. A support token is required for this operation.", + "operationId": "getSnapshotDiagnosticInfo", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Snapshot to get info from.", + "required": true, + "type": "string" + }, + { + "name": "snappable_id", + "in": "query", + "description": "Snappable ID of target object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns storage stats for a snapshot.", + "schema": { + "$ref": "#/definitions/SnapshotDiagnosticInfo" + } + } + }, + "x-group": "internal_snapshot" + } + }, + "/nutanix/vm/{id}/missed_snapshot": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get details about missed snapshots for a VM", + "description": "Retrieve the time of the day when the snapshots were missed specific to a vm.", + "operationId": "nutanixMissedSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about missed snapshots for a VM.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/session": { + "post": { + "tags": [ + "/session" + ], + "summary": "Create user session", + "description": "Open a user session.", + "operationId": "createSessionV2", + "parameters": [ + { + "in": "body", + "name": "session_request", + "description": "Create session request.", + "required": true, + "schema": { + "$ref": "#/definitions/SessionRequest" + } + } + ], + "responses": { + "200": { + "description": "Details about the user session.", + "schema": { + "$ref": "#/definitions/SessionResponse" + } + } + }, + "x-group": "session", + "x-rk-block-api-tokens": true, + "x-rk-primary-auth-ok": true + }, + "get": { + "tags": [ + "/session" + ], + "summary": "Get token summaries for a user", + "description": "Gets token summaries for a user.", + "operationId": "getTokenSummariesForUser", + "parameters": [ + { + "name": "user_id", + "in": "query", + "description": "ID of user for whom token summaries are queried.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of token summaries.", + "schema": { + "$ref": "#/definitions/TokenSummaryListResponse" + } + } + }, + "x-group": "session" + } + }, + "/host_fileset": { + "get": { + "tags": [ + "/host_fileset" + ], + "summary": "Get summary information for hosts", + "description": "Retrieve summary information for all hosts that are registered with a Rubrik cluster. For each host, we also retrieve the summary information for the filesets applied to the host.", + "operationId": "queryHostFileset", + "parameters": [ + { + "name": "hostname", + "in": "query", + "description": "Filter the summary information based on the hostname.", + "required": false, + "type": "string" + }, + { + "name": "operating_system_type", + "in": "query", + "description": "Filter the summary information based on the operating system type. Values are 'UnixLike' or 'Windows'.", + "required": false, + "type": "string", + "enum": [ + "UnixLike", + "Windows" + ] + }, + { + "name": "operating_system", + "in": "query", + "description": "Filter the summary information based on the operating system of the host.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filters the summary information based on the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter the summary information based on the ID of the effective SLA Domain inherited by filesets of the host. Use **_UNPROTECTED_** to only return information for hosts with filesets that do not have an effective SLA Domain. Use **_PROTECTED_** to only return information for hosts with filesets that do have an effective SLA Domain.", + "required": false, + "type": "string" + }, + { + "name": "template_id", + "in": "query", + "description": "Filter the summary information based on the ID of a fileset templates applied to the host. Use **_NO_FILESET_** to return information for hosts with no filesets. _NO_FILESET_ must be used with searchType 'exact'.", + "required": false, + "type": "string" + }, + { + "name": "search_type", + "in": "query", + "description": "Search type. Accepted values are 'infix' or 'exact'. Default is 'exact'.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Comma-separated list of attributes by which to sort hostName | operatingSystem | operatingSystemType | status.", + "required": false, + "type": "string" + }, + { + "name": "sort_order", + "in": "query", + "description": "Comma-separated list of sort orders (one for each sort_by) asc | desc\",.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for registered hosts.", + "schema": { + "$ref": "#/definitions/HostFilesetSummaryListResponse" + } + } + }, + "x-group": "host_fileset" + } + }, + "/hyperv/vm/snapshot/{id}/download_file": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Download file from VM snapshot", + "description": "Create a download file request.", + "operationId": "downloadHypervVirtualMachineSnapshotFile", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a download request.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervDownloadFileJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the instant recover request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/aws/account/{id}/garbage_resource": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get AWS resources that should get GC", + "description": "Retrieve AWS IDs for all AWS resources that should get GC.", + "operationId": "queryAwsAccountGarbageResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The id of the AWS source that contains the resources.", + "required": true, + "type": "string" + }, + { + "name": "include_instances", + "in": "query", + "description": "Whether to include AWS instances in the repsonse.", + "required": false, + "type": "boolean" + }, + { + "name": "include_volumes", + "in": "query", + "description": "Whether to include AWS volumes in the repsonse.", + "required": false, + "type": "boolean" + }, + { + "name": "include_snapshots", + "in": "query", + "description": "Whether to include AWS snapshots in the repsonse.", + "required": false, + "type": "boolean" + }, + { + "name": "include_images", + "in": "query", + "description": "Whether to include AWS images in the repsonse.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "The AWS IDs of the AWS resources returned.", + "schema": { + "$ref": "#/definitions/AwsAccountResourceIdObject" + } + } + }, + "x-group": "aws_account" + } + }, + "/oracle/db/{id}/export/tablespace": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "Export an Oracle tablespace", + "description": "Request an asynchronous job to export an Oracle tablespace from a specified snapshot or timestamp.", + "operationId": "createExportOracleTablespace", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the database containing the tablespace to export.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration parameters for a job to export an Oracle tablespace from a specified snapshot or timestamp.", + "required": true, + "schema": { + "$ref": "#/definitions/ExportOracleTablespaceConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for an async job to export an Oracle tablespace from a specified snapshot or timestamp.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/polaris/snapshot": { + "post": { + "tags": [ + "/polaris" + ], + "summary": "Get information about snapshots in a given time range", + "description": "Get information about snapshots in a given time range.", + "operationId": "getSnapshots", + "parameters": [ + { + "in": "body", + "name": "get_snapshots_config", + "description": "Config for snapshot upload job.", + "required": true, + "schema": { + "$ref": "#/definitions/GetSnapshotsConfig" + } + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous job to get snapshot information.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/node_management/management_ip": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get the management ip's netmask and gateway for this machine's cluster", + "description": "Get the management ip's netmask and gateway for this machine's cluster.", + "operationId": "getManagementIpConfig", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/ClusterIpConfig" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/storage/array_volume_group": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Add a storage array volume group", + "description": "Create a storage array volume group object.", + "operationId": "addStorageArrayVolumeGroup", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "JSON representation of a storage array volume group object.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupDefinition" + } + } + ], + "responses": { + "201": { + "description": "Created a storage array volume group object.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupDetail" + } + } + }, + "x-group": "storage_array_volume_group" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get list of volume groups", + "description": "Retrieve summary information about all storage array volume group objects.", + "operationId": "queryStorageArrayVolumeGroup", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter the summary information based on the ID of the effective SLA Domain inherited by a volume group. Use UNPROTECTED to only return information for volume groups that do not have an effective SLA Domain. Use PROTECTED to only return information for volume groups that do have an effective SLA Domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter the summary information based on the ID assigned to the primary Rubrik cluster. Use local for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the summary information to a specified maximum number of matches.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of matched entries contained in the response. The summary information includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter the summary information based on the relic status of the volume group. Returns both relic and non-relic volume groups when the parameter is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "name", + "in": "query", + "description": "Retrieve volume groups with a name that matches the provided name. The search is performed as a case-insensitive infix search.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter the summary information based on the SLA Domain assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "storage_array_id", + "in": "query", + "description": "Filter the summary information based on the ID assigned to the associated storage array.", + "required": false, + "type": "string" + }, + { + "name": "host_id", + "in": "query", + "description": "Filter the summary information based on the ID assigned to the associated application host.", + "required": false, + "type": "string" + }, + { + "name": "proxy_host_id", + "in": "query", + "description": "Filter the summary information based on the ID assigned to the associated proxy host.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies an attribute to use in sorting the volume group summary information. Performs an ASCII sort of the summary information using the specified attribute. Valid attribute values are 'slaDomainName' and 'name'.", + "required": false, + "type": "string", + "enum": [ + "slaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for storage array volume groups.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupSummaryListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/app_blueprint/snapshot/{id}": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get Blueprint snapshot details", + "description": "Retrieve detailed information about a specified snapshot for a Blueprint object.", + "operationId": "getAppBlueprintSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a Blueprint snapshot.", + "schema": { + "$ref": "#/definitions/AppBlueprintSnapshotDetail" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/diagnostic/snappable/{id}/latest": { + "get": { + "tags": [ + "/diagnostic" + ], + "summary": "Get diagnostic information on the most recent task of a snappable", + "description": "Get diagnostic information on the most recent task of a snappable.\n", + "operationId": "getSnappableDiagnosticLatest", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of the snappable.", + "required": true, + "type": "string" + }, + { + "name": "task_type", + "in": "query", + "description": "Filter task actions with task types.", + "required": true, + "type": "string", + "enum": [ + "Backup", + "LogBackup", + "Replication", + "LogReplication", + "Archival", + "ArchivalTiering", + "LogArchival", + "LogShipping", + "Instantiate", + "LiveMount", + "InstantRecovery", + "Export", + "Restore", + "InPlaceRecovery", + "DownloadFile", + "RestoreFile", + "Conversion", + "Index", + "Validation" + ] + } + ], + "responses": { + "200": { + "description": "Return diagnostic information on the most recent task of the snappable.\n", + "schema": { + "$ref": "#/definitions/TaskDiagnosticInfo" + } + }, + "404": { + "description": "Returned if there is no task found for the snappable.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_diagnostic" + } + }, + "/vcd/vapp/snapshot/{id}": { + "delete": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Delete vApp snapshot", + "description": "Designate a vApp snapshot as expired and available for garbage collection. The snapshot must be an on-demand snapshot or a snapshot from a vApp that is not assigned to an SLA Domain. This endpoint will be removed in CDM v6.1 in favor of `DELETE v1/vcd/vapp/snapshot/{id}`.", + "operationId": "deleteVappSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Location of the snapshot to delete. Use _local_ to delete only the local copy of the snapshot. Or use _all_ to delete the snapshot locally, on a replication target, and at an archival location. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp`.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + }, + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get vApp snapshot details", + "description": "Retrieve detailed information about a specified snapshot for a vApp object. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/snapshot/{id}`.", + "operationId": "getVappSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a vApp snapshot.", + "schema": { + "$ref": "#/definitions/VcdVappSnapshotDetail" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/app_blueprint/{id}/missed_snapshot": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get details about missed snapshots for a Blueprint", + "description": "Retrieve the timestamp for each missed snapshot for a specified Blueprint.", + "operationId": "appBlueprintMissedSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Blueprint.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for missed snapshots for a Blueprint.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/hierarchy/{id}/update": { + "post": { + "tags": [ + "/hierarchy" + ], + "summary": "Update cached hierarchy information", + "description": "Force an update of the cached hierarchy information for the specified object.", + "operationId": "updateHierarchyInformation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Hierarchy update successfully triggered." + } + }, + "x-group": "hierarchy" + } + }, + "/oracle/db/download/{snapshot_id}": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "Download Oracle snapshot from cloud", + "description": "Create an asynchronous job to download an Oracle database snapshot and associated logs using the snapshot ID. The response includes the ID of the asynchronous job request. To see the status of the request, poll /oracle/request/{id}.", + "operationId": "downloadOracleDbSnapshot", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID assigned to an Oracle database snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an async job request for the download of an Oracle database snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/hyperv/vm/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Download files from a Hyper-V VM backup", + "description": "Start an asynchronous job to download multiple files and folders from a specified Hyper-V VM backup. The response returns an asynchrounous request ID. Get the URL for downloading the ZIP file including the specific files/folders by sending a GET request to 'hyperv/vm/request/{id}'.", + "operationId": "downloadHypervVirtualMachineSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Hyper-V VM backup object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration information for a job to download files and folders from a Hyper-V VM backup.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an async job to download files and folders from a Hyper-V VM backup.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/report/data_source/{data_source_name}/csv": { + "get": { + "tags": [ + "/report" + ], + "summary": "Get table data from report data source", + "description": "Retrieve a CSV file containing the raw table data from the specified data source. Retrieved data is not constructed as a report. Returns a download link for the CSV file.", + "operationId": "getCsvFromDataSource", + "parameters": [ + { + "name": "data_source_name", + "in": "path", + "description": "Name of a report data source table.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Download link of the requested CSV file.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_report" + } + }, + "/volume_group/{id}/missed_snapshot": { + "get": { + "tags": [ + "/volume_group" + ], + "summary": "Get details about missed snapshots for a Volume Group", + "description": "Retrieve the time of the day when the snapshots were missed specific to a Volume Group.", + "operationId": "volumeGroupMissedSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Volume Group.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about missed snapshots for a Volume Group.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "volume_group" + } + }, + "/stats/cloud_storage/physical": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot physical cloud storage", + "description": "Retrieve the amount of physical cloud storage used by all snapshots from the Rubrik cluster.", + "operationId": "physicalCloudStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/vmware/vm/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/vmware/vm" + ], + "summary": "Restores multiple files/directories from VM snapshot", + "description": "Create a request to restore multiple file or folder to the source virtual machine.", + "operationId": "createRestoreFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the restore request.", + "required": true, + "schema": { + "$ref": "#/definitions/RestoreFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of the restore request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "vm" + } + }, + "/managed_volume/snapshot/{id}/download": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Create a download managed volume snapshot from archival request", + "description": "Create a download managed volume snapshot from archival request.", + "operationId": "createDownloadManagedVolumeSnapshotFromCloud", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/network_throttle/{id}": { + "patch": { + "tags": [ + "/network_throttle" + ], + "summary": "Update a network throttle", + "description": "Update the configuration of a specified network throttle object.", + "operationId": "updateNetworkThrottle", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a network throttle object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "throttle_update", + "description": "Configuration changes to apply to a specified network throttle object. Unspecified values are left unchanged.", + "required": true, + "schema": { + "$ref": "#/definitions/NetworkThrottleUpdate" + } + } + ], + "responses": { + "200": { + "description": "Summary of the updated network throttle.", + "schema": { + "$ref": "#/definitions/NetworkThrottleSummary" + } + } + }, + "x-group": "network_throttle" + } + }, + "/cloud_on/aws/request/{id}": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get asynchronous request details for Aws-related jobs", + "description": "Get the details of an asynchronous request that involves Aws.", + "operationId": "getAwsAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/archive/location/{id}": { + "delete": { + "tags": [ + "/archive" + ], + "summary": "Delete an archival location", + "description": "Delete the specified archival location. Remove the archival location from the SLA Domains that reference it and expire all snapshots at the archival location.\n", + "operationId": "deleteArchivalLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an archival location object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to delete an archival location.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/app_blueprint/{id}": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get Blueprint details", + "description": "Retrieve detailed information for a specified Blueprint.", + "operationId": "getAppBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information about a Blueprint object.", + "schema": { + "$ref": "#/definitions/AppBlueprintDetail" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/nutanix/hierarchy/{id}": { + "get": { + "tags": [ + "/nutanix/hierarchy" + ], + "summary": "Get summary of a hierarchy object", + "description": "Retrieve details for the specified hierarchy object.", + "operationId": "getNutanixHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of the hierarchy object.", + "schema": { + "$ref": "#/definitions/NutanixHierarchyObjectSummary" + } + } + }, + "x-group": "nutanix_hierarchy" + } + }, + "/managed_volume/{id}/resize": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Resize managed volume", + "description": "Resize the managed volume to a larger size. Once a volume size has been increased, it can not be decreased.", + "operationId": "resizeApiForManagedVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of managed volume.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "size", + "description": "New size of the managed volume in bytes.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeResize" + } + } + ], + "responses": { + "202": { + "description": "Status for the resize request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/managed_volume/snapshot/{id}/restore": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Create a request to export a snapshot and mount it on a host", + "description": "Export a managed volume snapshot as a share and mount it on a given host.", + "operationId": "exportSlaSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "params", + "description": "Export parameters to configure the export's properties, including type of share, hostPatterns, SMB share info, subnet of the mount points, host, and mount paths on the host.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeSlaExportConfig" + } + } + ], + "responses": { + "202": { + "description": "Details of the snapshot export.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/vmware/vcenter/{id}/tag_category": { + "get": { + "tags": [ + "/vmware/vcenter" + ], + "summary": "Get Tag Categories associated with vCenter", + "description": "Get a list of tag categories on this vCenter.", + "operationId": "getVsphereTagCategories", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vCenter Server.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of vCenter Tag Categories.", + "schema": { + "$ref": "#/definitions/VsphereCategoryListResponse" + } + } + }, + "x-group": "vcenter" + } + }, + "/replication/source/{id}": { + "delete": { + "tags": [ + "/replication" + ], + "summary": "REQUIRES SUPPORT TOKEN: Remove a replication source", + "description": "REQUIRES SUPPORT TOKEN: Remove a specified replication source from this Rubrik cluster. A support token is required for this operation.", + "operationId": "deleteReplicationSource", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the replication source to be removed.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successfully removed replication source with ID of .", + "schema": { + "$ref": "#/definitions/DeleteReplicationSourceSummary" + } + } + }, + "x-group": "replication" + }, + "get": { + "tags": [ + "/replication" + ], + "summary": "Get summary for a specific replication source", + "description": "Retrieve the ID, name, and address for a specified replication source.", + "operationId": "getReplicationSource", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a replication source.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary details for the specified replication source.", + "schema": { + "$ref": "#/definitions/ReplicationSourceSummary" + } + } + }, + "x-group": "replication" + } + }, + "/hyperv/host/request/{id}": { + "get": { + "tags": [ + "/hyperv/host" + ], + "summary": "Get Hyper-V host async request", + "description": "Get details about a Hyper-V host related async request.", + "operationId": "getHypervHostAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_host" + } + }, + "/stats/average_storage_growth_per_day": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Returns the average daily growth of local storage used", + "description": "Returns the average daily growth of local storage used.", + "operationId": "averageLocalGrowthPerDay", + "parameters": [], + "responses": { + "200": { + "description": "Returns average daily growth in bytes.", + "schema": { + "$ref": "#/definitions/StorageGrowth" + } + } + }, + "x-group": "internal_report" + } + }, + "/stats/total_replication_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get total storage including local vm storage replicated across targets and also remote vm storage on premise", + "description": "Get Total storage including local vm storage replicated across targets and also remote vm storage on premise.", + "operationId": "totalReplicationStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with replication storage stats.", + "schema": { + "$ref": "#/definitions/ReplicationStorage" + } + } + }, + "x-group": "stats" + } + }, + "/storage/array_volume_group/snapshot/{id}/restore_files": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Restore files", + "description": "Initiate an asynchronous job to restore files from a snapshot of a storage array volume group to the source location. The call returns a job instance ID.", + "operationId": "restoreStorageArrayVolumeGroupSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for an asynchronous job to restore files from a snapshot of a storage array volume group to the source location.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayRestoreFilesConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous job to restore files.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/event_series/{id}/cancel": { + "post": { + "tags": [ + "/event_series" + ], + "summary": "(CAUTION! WE ARE DEPRECATING THIS ENDPOINT) Cancel an event", + "description": "Request cancellation of an event. To succeed, the 'isCancelable' parameter of the event must be 'true'. Uses the event series ID and jobInstanceId to retrieve information about an event. JobInstnceId can be left unspecified in case of a non job related event, but is expected for a job related event.", + "operationId": "cancel", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The Event Series ID (event_series_id) of the event.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "event_series_detail", + "description": "The event series detail for this event, which contains the job instance id and event series id.", + "required": false, + "schema": { + "$ref": "#/definitions/EventSeriesDetail" + } + } + ], + "responses": { + "204": { + "description": "The specified event is canceled." + } + }, + "x-group": "events" + } + }, + "/organization/{id}/hyperv": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Hyper-v resources in an organization", + "description": "Retrieve summary information for the explicitly authorized Hyper-v resources of an organization. Information for a Hyper-v resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedHypervResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Hyper-V resources.", + "schema": { + "$ref": "#/definitions/HypervHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_hyperv" + } + }, + "/vcd/vapp/{id}/search": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Search for a file from a vApp", + "description": "Aggregated search for a file through snapshots of all virtual machines that are presently part of the vApp. Specify the file by full path prefix or filename prefix. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/{id}/search`.", + "operationId": "searchVapp", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vApp.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Use either a path prefix or a filename prefix.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "File search results.", + "schema": { + "$ref": "#/definitions/AppSearchResponseListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/oracle/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/oracle/hierarchy" + ], + "summary": "Get list of descendant objects", + "description": "Retrieve the list of descendant objects for the specified parent.", + "operationId": "getOracleHierarchyDescendants", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filter a response by making an infix comparison of the Oracle RAC name or standalone host name, names of databases on the RAC or the host, database SIDs and database tablespaces in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "id", + "in": "path", + "description": "ID of the parent Oracle hierarchy object.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "AppBlueprint", + "AwsAccount", + "CloudCompute", + "CloudComputeRegion", + "CloudNativeAuthzRoot", + "ComputeCluster", + "DataCenter", + "DataStore", + "Ec2Instance", + "ExclusionPattern", + "ExclusionPatternAuthzRoot", + "Folder", + "Hdfs", + "HostFailoverCluster", + "HostRoot", + "HypervAuthzRoot", + "HypervCluster", + "HypervScvmm", + "HypervServer", + "HypervVirtualMachine", + "FailoverClusterApp", + "KuprHost", + "KuprHostAuthzRoot", + "LinuxFileset", + "LinuxHost", + "LinuxHostAuthzRoot", + "ManagedVolume", + "ManagedVolumeAuthzRoot", + "ManagedVolumeRoot", + "MssqlAuthzRoot", + "MssqlDatabase", + "MssqlAvailabilityGroup", + "MssqlInstance", + "NasHost", + "NasHostAuthzRoot", + "NasSystem", + "NfsHostShare", + "NutanixAuthzRoot", + "NutanixCluster", + "NutanixVirtualMachine", + "OracleAuthzRoot", + "OracleDatabase", + "OracleHost", + "OracleRac", + "OracleRoot", + "SapHanaAuthzRoot", + "SapHanaDatabase", + "SapHanaSystem", + "ShareFileset", + "SlaDomain", + "SmbHostShare", + "StorageArray", + "StorageArrayVolume", + "StorageArrayVolumeGroup", + "Storm", + "User", + "vCenter", + "Vcd", + "VcdAuthzRoot", + "VcdCatalog", + "VcdOrg", + "VcdOrgVdc", + "VcdVapp", + "VcdVimServer", + "VirtualMachine", + "VmwareAuthzRoot", + "VmwareHost", + "VmwareResourcePool", + "VmwareStoragePolicy", + "VmwareTag", + "VmwareTagCategory", + "WindowsCluster", + "WindowsFileset", + "WindowsHost", + "WindowsHostAuthzRoot", + "WindowsVolumeGroup" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "status", + "in": "query", + "description": "Filter by status.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendentCount.cluster", + "descendentCount.host", + "descendentCount.db" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Determines whether to fetch Oracle hiererchy objects with additional privilege checks.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/OracleHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "oracle_hierarchy" + } + }, + "/report/{id}/refresh": { + "post": { + "tags": [ + "/report" + ], + "summary": "Begins to refresh the data associated with a report", + "description": "Begins to refresh the data associated with the report specified by id. This should be used following calls to the various report patch endpoints in order to trigger the data update.", + "operationId": "refreshReportData", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of the report to refresh.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Returns the status of the refresh data request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "internal_report" + } + }, + "/archive/location/job/reconnect/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get details about an archival location reconnect job", + "description": "Retrieve the following information about job: ID of job, job status, error details, start time of job, end time of job, job type, ID of the node, job progress and location ID.", + "operationId": "getReconnectArchivalLocationJobInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Job.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return job details.", + "schema": { + "$ref": "#/definitions/JobInstanceDetail" + } + } + }, + "x-group": "archival" + } + }, + "/hyperv/vm/snapshot/{id}/browse": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Lists all files in VM snapshot", + "description": "Lists all files and directories in a given path.", + "operationId": "browseHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path to start the directory listing from.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/polaris/app_blueprint/snapshot/{id}/instant_recover": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Instant Recovery of Blueprint virtual machines", + "description": "Use Instant Recovery to recover specified Blueprint virtual machines.", + "operationId": "createAppBlueprintSnapshotInstantRecovery", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a request to recover specified virtual machines from a Blueprint snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/AppBlueprintInstantRecoveryJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for async Instant Recovery job for virtual machines in a Blueprint snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/nutanix/vm/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Download files from a Nutanix VM backup", + "description": "Start an asynchronous job to download multiple files and folders from a specified Nutanix VM backup. The response returns an asynchronous request ID. Get the URL for downloading the zip file including the specific files/folders by sending a GET request to 'nutanix/vm/request/{id}'.", + "operationId": "createNutanixDownloadFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Nutanix VM backup object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration information for a job to download files and folders from a Nutanix VM backup.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an async job to download files and folders from a Nutanix VM backup.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/hyperv/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/hyperv/hierarchy" + ], + "summary": "Get list of descendant objects", + "description": "Retrieve the list of descendant objects for the specified parent.", + "operationId": "getHypervHierarchyDescendants", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent Hyper-V hierarchy object.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "scvmm", + "hostOrCluster", + "cluster", + "host", + "vm" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "status", + "in": "query", + "description": "Filter by status.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendentCount.cluster", + "descendentCount.host", + "descendentCount.vm" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters Hyper-V hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/HypervHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "hyperv_hierarchy" + } + }, + "/report/email_subscription/{subscription_id}": { + "delete": { + "tags": [ + "/report" + ], + "summary": "Deletes a specific email subscription for a report", + "description": "Deletes the email subscription specified by subscription_id for the report it is a subscription of.", + "operationId": "deleteEmailSubscription", + "parameters": [ + { + "name": "subscription_id", + "in": "path", + "description": "Id of the specific email seubscription to retrieve.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Email subscription was successfully deleted." + } + }, + "x-group": "internal_report" + }, + "get": { + "tags": [ + "/report" + ], + "summary": "Returns specific email subscription for a report", + "description": "Returns the email subscription specified by subscription_id for the report it is a subscription of.", + "operationId": "getSingleEmailSubscription", + "parameters": [ + { + "name": "subscription_id", + "in": "path", + "description": "Id of the specific email seubscription to retrieve.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Specific email subscription summary.", + "schema": { + "$ref": "#/definitions/EmailSubscriptionSummary" + } + } + }, + "x-group": "internal_report" + }, + "patch": { + "tags": [ + "/report" + ], + "summary": "Updates a specific email subscription for a report", + "description": "Updates the email subscription specified by subscription_id for the report it is a subscription of.", + "operationId": "updateEmailSubscription", + "parameters": [ + { + "name": "subscription_id", + "in": "path", + "description": "Id of the specific email seubscription to retrieve.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "subscribe_request", + "description": "Definition of the updated email subscription.", + "required": true, + "schema": { + "$ref": "#/definitions/EmailSubscriptionUpdate" + } + } + ], + "responses": { + "200": { + "description": "The updated email subscription summary.", + "schema": { + "$ref": "#/definitions/EmailSubscriptionSummary" + } + } + }, + "x-group": "internal_report" + } + }, + "/config/usersettable_hyperv/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Hyperv configuration", + "description": "Reset the global Hyperv configuration.", + "operationId": "resetUserSettableHypervConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalHypervConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalHypervConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/polaris/app_blueprint/cloud_on/aws/app_image": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Create an AppBlueprint image on AWS", + "description": "Create an AppBlueprint image at a specified AWS location using a specified snapshot.", + "operationId": "createAwsAppCloudMachineImage", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "ID of an AppBlueprint snapshot.", + "required": true, + "type": "string" + }, + { + "name": "location_id", + "in": "query", + "description": "ID of data location to create the image on.", + "required": true, + "type": "string" + }, + { + "name": "should_create_when_archived", + "in": "query", + "description": "A Boolean value that determines whether to create image when the snappable specified by the snapshot_id is archived. When the value is 'true', the image will be created when the snappable is archived. When the value is 'false', the image will not be created when then snappable is archived.\n", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "202": { + "description": "Status for the AWS image creation request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance_app_blueprint" + }, + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Query for AppBlueprint cloud images", + "description": "Retrieve a list of the available AppBlueprint cloud images.", + "operationId": "queryAwsAppCloudMachineImage", + "parameters": [ + { + "name": "snappable_id", + "in": "query", + "description": "Filters AWS AppBlueprint cloud images by snappable ID.", + "required": false, + "type": "string" + }, + { + "name": "location_id", + "in": "query", + "description": "Filters AWS AppBlueprint cloud images by location ID.", + "required": false, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore the specified number of matches from the beginning of the list.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Retrieve only the specified number of records. Default returns all available records.\n", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "snappable_name", + "in": "query", + "description": "Limit results to records for those sources with a name that matches the specified query string.\n", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "default": "AppBlueprintName", + "enum": [ + "AppBlueprintName", + "LocationName", + "SnapshotTime", + "CreationTime" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for all AWS AppBlueprint cloud images.", + "schema": { + "$ref": "#/definitions/AwsAppImageSummaryListResponse" + } + } + }, + "x-group": "cloud_instance_app_blueprint" + } + }, + "/stats/streams/count": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get the number of concurent streams", + "description": "Get the number of concurent streams.", + "operationId": "streams", + "parameters": [], + "responses": { + "200": { + "description": "Returns a total number of concurent streams.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "stats" + } + }, + "/job/{id}/runtime": { + "get": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Get estimated time remaining of a job", + "description": "REQUIRES SUPPORT TOKEN - Get estimated time remaining of a job.", + "operationId": "getJobRuntime", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the job to get estimated runtime on.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Runtime of the running job.", + "schema": { + "$ref": "#/definitions/JobRuntime" + } + } + }, + "x-group": "internal_job" + } + }, + "/vmware/vm/snapshot/mount/{id}/rollback": { + "post": { + "tags": [ + "/vmware/vm" + ], + "summary": "Rollback an Instant Recovery", + "description": "After an Instant Recovery that used the preserve MOID setting, rollback the datastore used by the recovered virtual machine from the Rubrik cluster to the original datastore.", + "operationId": "rollbackMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Live Mount object.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of an async request to rollback the datastore used by a recovered virtual machine.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "vm" + } + }, + "/volume_group/snapshot/{id}": { + "delete": { + "tags": [ + "/volume_group" + ], + "summary": "Delete Volume Group snapshot", + "description": "Delete a snapshot by expiring it. A snapshot can only be expired if it is an on-demand snapshot or a snapshot of an unprotected Volume Group.", + "operationId": "deleteVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Snapshot location to delete. Use **_local_** to delete all local snapshots and **_all_** to delete the snapshot in all locations.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "x-group": "volume_group" + } + }, + "/organization/{id}": { + "delete": { + "tags": [ + "/organization" + ], + "summary": "Delete an Organization", + "description": "Delete a specified Organization object.", + "operationId": "deleteOrganization", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an Organization object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Delete the specified Organization object." + }, + "404": { + "description": "Failed to delete a specified Organization object." + } + }, + "x-group": "organization" + }, + "get": { + "tags": [ + "/organization" + ], + "summary": "Get Organization Details", + "description": "Retrieve the details of a specified Organization object.", + "operationId": "getOrganization", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an Organization object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of a specified Organization object.", + "schema": { + "$ref": "#/definitions/OrganizationDetail" + } + } + }, + "x-group": "organization" + }, + "patch": { + "tags": [ + "/organization" + ], + "summary": "Update an Organization", + "description": "Make changes to the parameters of a specified Organization object.", + "operationId": "updateOrganization", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an Organization object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "organization_update", + "description": "Updated parameters for the specified Organization object.", + "required": true, + "schema": { + "$ref": "#/definitions/OrganizationUpdate" + } + } + ], + "responses": { + "200": { + "description": "Detailed information for the updated organization.", + "schema": { + "$ref": "#/definitions/OrganizationSummary" + } + } + }, + "x-group": "organization" + } + }, + "/organization/{id}/linux": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Linux hosts in an organization", + "description": "Retrieve summary information for the explicitly authorized Linux hosts of an organization. Information for a Linux host is only included when the organization has an explicit authorization for the host. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedLinuxHosts", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Linux hosts.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "organization_host" + } + }, + "/mssql/db/{id}/restore_estimate": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns size estimates for a restore or export operation", + "description": "Provides an estimate of resources needed for the specified restore or export operation. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/{id}/restore_estimate`.", + "operationId": "mssqlRestoreEstimate", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL database.", + "required": true, + "type": "string" + }, + { + "name": "time", + "in": "query", + "description": "Time to recover to, in ISO8601 date-time format, such as \"2016-01-01T01:23:45.678\". Either this or the LSN must be specified.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "lsn", + "in": "query", + "description": "LSN to recover to. Either this or time must be specified.", + "required": false, + "type": "string" + }, + { + "name": "recovery_fork_guid", + "in": "query", + "description": "Recovery fork GUID of LSN to recover to. Has meaning only when lsn is specified.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the esimate for a restore or export to the specified recovery point.", + "schema": { + "$ref": "#/definitions/MssqlRestoreEstimateResult" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/vmware/vm/snapshot/{id}/standalone_esx_host_export": { + "post": { + "tags": [ + "/vmware/vm" + ], + "summary": "Create an export request to standalone esx server", + "description": "Export snapshot of a vm to stanadlone esx server.", + "operationId": "createStandaloneExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the export request to standalone esx host.", + "required": true, + "schema": { + "$ref": "#/definitions/ExportSnapshotToStandaloneHostRequest" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "vm" + } + }, + "/hyperv/vm/snapshot/{id}": { + "delete": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Delete VM snapshot", + "description": "Delete a snapshot by expiring it. Snapshot is expired only if it is a manual snapshot or a snapshot of an unprotected vm.", + "operationId": "deleteHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Snapshot location to delete. Use **_local_** to delete all local snapshots and **_all_** to delete the snapshot in all locations.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "x-group": "hyperv_vm" + }, + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get VM snapshot details", + "description": "Retrieve detailed information about a snapshot.", + "operationId": "getHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a snapshot.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineSnapshotDetail" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/storage/array": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Add a storage array", + "description": "Adds a storage array object and initiates an asynchronous job to obtain the metadata of the storage array for the object.", + "operationId": "addStorageArray", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "JSON object for storage array definition.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayDefinition" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous request to update storage array metadata.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Summary of all storage arrays", + "description": "Retrieve the host IP and username for all storage arrays.", + "operationId": "queryStorageArrays", + "parameters": [], + "responses": { + "200": { + "description": "Storage arrays.", + "schema": { + "$ref": "#/definitions/StorageArrayDetailListResponse" + } + } + }, + "x-group": "storage_array" + } + }, + "/oracle/db/snapshot/{id}": { + "delete": { + "tags": [ + "/oracle" + ], + "summary": "Delete a particular Oracle database snapshot", + "description": "Given a particular snapshot id, delete the corresponding Oracle database snapshot.", + "operationId": "deleteOracleDbSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified Oracle database snapshot and there is nothing to return." + } + }, + "x-group": "oracle_db" + }, + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get information about a Oracle database snapshot", + "description": "Retrieve detailed information about a specified Oracle database snapshot.", + "operationId": "getOracleDbSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a specified Oracle database snapshot.", + "schema": { + "$ref": "#/definitions/OracleDbSnapshotDetail" + } + } + }, + "x-group": "oracle_db" + } + }, + "/organization/{id}/storage/array": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized storage array resources in an organization", + "description": "Retrieve summary information for the explicitly authorized storage array resources of an organization. Information for a storage array resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedStorageArrayResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get storage array resources.", + "schema": { + "$ref": "#/definitions/StorageArrayHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_storage_array_volume_group" + } + }, + "/host/{id}/flag": { + "get": { + "tags": [ + "/host" + ], + "summary": "REQUIRES SUPPORT TOKEN - Get the value for a physical host flag", + "description": "REQUIRES SUPPORT TOKEN - Retrieve physical host agent flag value. Physical host agent flags are defined during compile-time. A support token is required for this operation.", + "operationId": "getHostFlag", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the registered host.", + "required": true, + "type": "string" + }, + { + "name": "flag", + "in": "query", + "description": "Flag name.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Physical host flag and value.", + "schema": { + "$ref": "#/definitions/HostFlag" + } + } + }, + "x-group": "hosts" + }, + "patch": { + "tags": [ + "/host" + ], + "summary": "REQUIRES SUPPORT TOKEN - Modify physical host agent flags", + "description": "REQUIRES SUPPORT TOKEN - Modify physical host agent flags to new values. Physical host agent flags are defined during compile-time. They can be passed in during agent startup or can be modified dynamically using this endpoint. A support token is required for this operation.", + "operationId": "setHostFlag", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the registered host.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "flag", + "description": "Flag name and new value.", + "required": true, + "schema": { + "$ref": "#/definitions/HostFlag" + } + } + ], + "responses": { + "204": { + "description": "Physical host agent flags successfully modified." + } + }, + "x-group": "hosts" + } + }, + "/polaris/app_blueprint/{id}/snapshot": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Create an on-demand snapshot for a Blueprint", + "description": "Start an asynchronous job to create an on-demand snapshot for a specified Blueprint object.", + "operationId": "createAppBlueprintSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand backup of a specified Blueprint object.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of async job for an on-demand snapshot of a Blueprint.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + }, + "delete": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Delete all snapshots of Blueprint", + "description": "Delete all snapshots for a specified Blueprint object.", + "operationId": "deleteAppBlueprintSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "x-group": "app_blueprint" + } + }, + "/vmware/vm/snapshot/mount/{id}": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Get summary information for a live mount", + "description": "Retrieve the following summary information for a specified live mount: ID, snapshot date, ID of source VM, name of source VM, ID of source host, status of the mount, mount event ID, and unmount event ID.", + "operationId": "getMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the live mount.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns detail information for a specified live mount.", + "schema": { + "$ref": "#/definitions/MountDetail" + } + } + }, + "deprecated": true, + "x-group": "vm" + } + }, + "/stats/archival/bandwidth/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get the bandwidth of the archival location", + "description": "Gets the bandwidth of the archival location. If data location ID is not provided, returns the sum over all archival locations.", + "operationId": "archivalBandwidth", + "parameters": [ + { + "name": "data_location_id", + "in": "query", + "description": "Data location ID.", + "required": false, + "type": "string" + }, + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + }, + { + "name": "bandwidth_type", + "in": "query", + "description": "Bandwidth type for the archival location.", + "required": false, + "type": "string", + "enum": [ + "Incoming", + "Outgoing", + "All" + ] + } + ], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/vmware/config/set_datastore_freespace_threshold": { + "patch": { + "tags": [ + "/vmware/config" + ], + "summary": "Set the configuration value for the datastore freespace threshold", + "description": "Set the configuration value for the datastore freespace threshold for VMware virtual machine backups.", + "operationId": "setDatastoreFreespaceThreshold", + "parameters": [ + { + "in": "body", + "name": "vm_datastore_freespace_threshold", + "description": "Datastore freespace threshold and VMware virtual machine ID (optional).", + "required": true, + "schema": { + "$ref": "#/definitions/VmwareDatastoreFreespaceThreshold" + } + } + ], + "responses": { + "200": { + "description": "Updated value of the freespace threshold on the datastore that allows VMware virtual machine backups.", + "schema": { + "$ref": "#/definitions/VmwareDatastoreFreespaceThreshold" + } + } + }, + "x-group": "vmware_config" + } + }, + "/notification_setting": { + "post": { + "tags": [ + "/notification_setting" + ], + "summary": "Create a new Notification Setting", + "description": "Create a new Notification Setting with the specified parameters.", + "operationId": "createNotificationSetting", + "parameters": [ + { + "in": "body", + "name": "notification_setting_create", + "description": "Parameters for creating a Notification Setting.", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationSettingCreate" + } + } + ], + "responses": { + "201": { + "description": "Summary of the new Notification Setting.", + "schema": { + "$ref": "#/definitions/NotificationSettingSummary" + } + } + }, + "x-group": "events" + }, + "get": { + "tags": [ + "/notification_setting" + ], + "summary": "Get Notification Settings", + "description": "Get list of all Notification Settings.", + "operationId": "queryNotificationSettings", + "parameters": [], + "responses": { + "200": { + "description": "Get list of all Notification Settings.", + "schema": { + "$ref": "#/definitions/NotificationSettingSummaryListResponse" + } + } + }, + "x-group": "events" + } + }, + "/managed_volume/snapshot/{id}/export": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Create a request to export a snapshot", + "description": "Export a managed volume snapshot as a share.", + "operationId": "exportSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "params", + "description": "Export parameters.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeExportConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/aws/ec2_instance/indexing_state": { + "patch": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Indexing update of specified EC2 instances", + "description": "Update whether to enable or disable indexing in the specified list of EC2 instances.", + "operationId": "updateAwsEc2InstancesIndexingState", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the updates of the indexing state of a given list of EC2 instances.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstancesIndexingStateUpdateConfig" + } + } + ], + "responses": { + "204": { + "description": "Successfully updated indexing state of all EC2 instances." + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/cluster/{id}/is_hardware_encrypted": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get if it is hardware encrypted", + "description": "Check whether this Rubrik cluster is hardware encrypted.", + "operationId": "isHardwareEncrypted", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "True when the Rubrik cluster is hardware encrypted.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/vcd/cluster": { + "post": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Add a vCD Cluster", + "description": "Create a vCD Cluster object by providing the address of the vCD Cluster and the credentials for an account on the vCD Cluster that has administrator privileges. This request initiates an asynchronous job to connect with the vCD Cluster and retrieve the required metadata. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/cluster`.", + "operationId": "createVcdCluster", + "parameters": [ + { + "in": "body", + "name": "vcd_detail", + "description": "IP address and account credentials of the vCD Cluster, and ID of the managing Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/VcdClusterConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an async request to add a vCD Cluster.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + }, + "get": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Get summary for all vCD Clusters", + "description": "Retrieve summary information for all vCD cluster objects. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/cluster`.", + "operationId": "queryVcdCluster", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Search for a vCD Cluster object by name.", + "required": false, + "type": "string" + }, + { + "name": "status", + "in": "query", + "description": "Filter the results using the status value of the vCD Cluster objects.", + "required": false, + "type": "string", + "enum": [ + "Disconnected", + "Refreshing", + "Connected", + "BadlyConfigured", + "Deleting", + "Remote" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "Status" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for vCD clusters.", + "schema": { + "$ref": "#/definitions/VcdClusterSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + } + }, + "/cluster/{id}/packages": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Rubrik CDM packages for installation", + "description": "List of Rubrik CDM packages available for installation.", + "operationId": "getPackages", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of Rubrik CDM packages available for installation.", + "schema": { + "$ref": "#/definitions/CdmPackageInfoListResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/organization/{id}/oracle": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized Oracle resources in an organization", + "description": "Retrieve summary information for the explicitly authorized Oracle resources of an organization. Information for a Oracle resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedOracleResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get Oracle resources.", + "schema": { + "$ref": "#/definitions/OracleHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_oracle" + } + }, + "/stats/snapshot_storage/physical": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot physical storage", + "description": "Retrieve the amount of physical Rubrik cluster storage used by snapshots.", + "operationId": "physicalSnapshotStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/nutanix/cluster/{id}/refresh": { + "post": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Refresh Nutanix cluster metadata", + "description": "Create a job to refresh the metadata for the specified Nutanix cluster.", + "operationId": "createNutanixClusterRefresh", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix cluster.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_cluster" + } + }, + "/log": { + "post": { + "tags": [ + "/log" + ], + "summary": "INTERNAL ONLY: Log a message", + "description": "Used by UI to generate logs.", + "operationId": "log", + "parameters": [ + { + "in": "body", + "name": "log_message", + "description": "Details of message to log.", + "required": true, + "schema": { + "$ref": "#/definitions/LogMessage" + } + } + ], + "responses": { + "204": { + "description": "TODO." + } + }, + "x-group": "internal_log", + "x-unauthenticated": true + } + }, + "/aws/ec2_instance/snapshot/{id}/export": { + "post": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Export EC2 instance snapshot", + "description": "Export a specified snapshot of an EC2 instance.", + "operationId": "exportAwsEc2InstanceSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for exporting an EC2 instance snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstanceSnapshotExportConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/polaris/failover/target/{id}/start": { + "put": { + "tags": [ + "/polaris/failover" + ], + "summary": "Start a failover to this location", + "description": "Provide the details required to fail over a Blueprint to the current location.", + "operationId": "startFailoverOnTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the failover operation.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Definition of the failover.", + "required": true, + "schema": { + "$ref": "#/definitions/TriggerFailoverOnTargetDefinition" + } + } + ], + "responses": { + "202": { + "description": "Failover job succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "failover" + } + }, + "/hyperv/vm/snapshot/{id}/mount": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Create a live mount request", + "description": "Create a live mount request with given configuration.", + "operationId": "createHypervVirtualMachineSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the mount request.", + "required": false, + "schema": { + "$ref": "#/definitions/HypervMountSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the mount request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/principal_search": { + "post": { + "tags": [ + "/principal_search" + ], + "summary": "Searches for security principals", + "description": "Searches for security principals using one or more independent queries. The result set is the union of the principals returned by each query.", + "operationId": "searchPrincipals", + "parameters": [ + { + "in": "body", + "name": "search_request", + "description": "Search request to perform.", + "required": true, + "schema": { + "$ref": "#/definitions/PrincipalSearchRequest" + } + } + ], + "responses": { + "200": { + "description": "Returns the list of matching principals.", + "schema": { + "$ref": "#/definitions/PrincipalSummaryListResponse" + } + } + }, + "x-group": "principal_search" + } + }, + "/cloud_on/azure/region": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get all the regions in Azure", + "description": "Get all the regions in Azure.\n", + "operationId": "getAzureRegions", + "parameters": [], + "responses": { + "200": { + "description": "Returns list of id name pair with each element being azure region. Id is the unique identifier and name is displayable name.\n", + "schema": { + "$ref": "#/definitions/IdNamePairListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/authorization/effective/for_resources": { + "post": { + "tags": [ + "/authorization" + ], + "summary": "Queries the current effective authorizations on the specified resources\n", + "description": "Queries the current principal's desired authorizations on the specified resources.\n", + "operationId": "effectiveAuthorizationsForResources", + "parameters": [ + { + "in": "body", + "name": "effective_authorizations_query", + "description": "Query listing all resources to get effective authorizations for.\n", + "required": true, + "schema": { + "$ref": "#/definitions/EffectiveAuthorizationsQuery" + } + } + ], + "responses": { + "200": { + "description": "The list of effective authorizations for the desired principal on the specified resources.\n", + "schema": { + "$ref": "#/definitions/EffectiveAuthorizationSummary" + } + }, + "400": { + "description": "Returned if an invalid ManagedId is given.", + "schema": { + "type": "string" + } + } + }, + "x-group": "authorization", + "x-rubrik-comment": "POST is used since the list of resources may exceed the GET url size limit" + } + }, + "/report/{id}/email_subscription": { + "post": { + "tags": [ + "/report" + ], + "summary": "Creates email subscription for a report", + "description": "Creates an email subscription for the report specified by id.", + "operationId": "createEmailSubscription", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of the report to create an email subscription for.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "subscribe_request", + "description": "Definition of the email subscription to create.", + "required": true, + "schema": { + "$ref": "#/definitions/EmailSubscriptionCreate" + } + } + ], + "responses": { + "200": { + "description": "Summary of email subscriptions for the report specified by id.", + "schema": { + "$ref": "#/definitions/EmailSubscriptionSummary" + } + } + }, + "x-group": "internal_report" + }, + "get": { + "tags": [ + "/report" + ], + "summary": "Returns email subscriptions for a report", + "description": "Returns all email subscriptions for the report specified by id.", + "operationId": "getEmailSubscriptions", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of the report to get email subscriptions for.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary of email subscriptions for the report specified by id.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/EmailSubscriptionSummary" + } + } + } + }, + "x-group": "internal_report" + } + }, + "/session/bulk_delete": { + "post": { + "tags": [ + "/session" + ], + "summary": "Delete session tokens", + "description": "Deletes session tokens.", + "operationId": "bulkDeleteSessions", + "parameters": [ + { + "in": "body", + "name": "bulk_delete_sessions_request", + "description": "Request to delete sessions.", + "required": true, + "schema": { + "$ref": "#/definitions/BulkDeleteSessionsRequest" + } + } + ], + "responses": { + "204": { + "description": "Successfully deleted the specified session token(s)." + } + }, + "x-group": "session" + } + }, + "/oracle/db/{id}": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "(DEPRECATED) Get Oracle database information", + "description": "Retrieve detailed information for a specified Oracle database object. This endpoint will be removed in Rubrik CDM 7.0 in favor of `GET v1/oracle/db/{id}`.", + "operationId": "getOracleDb", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleDbDetail" + } + } + }, + "deprecated": true, + "x-group": "oracle_db" + }, + "patch": { + "tags": [ + "/oracle" + ], + "summary": "(DEPRECATED) Update an Oracle database", + "description": "Update properties of an Oracle database object. This endpoint will be removed in Rubrik CDM 7.0 in favor of `PATCH v1/oracle/db/{id}`.", + "operationId": "updateOracleDb", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to use for the update of an Oracle database object.", + "required": true, + "schema": { + "$ref": "#/definitions/OracleUpdate" + } + } + ], + "responses": { + "200": { + "description": "Successfully updated an Oracle database object.", + "schema": { + "$ref": "#/definitions/OracleDbDetail" + } + } + }, + "deprecated": true, + "x-group": "oracle_db" + } + }, + "/volume_group/snapshot/{id}/mount": { + "post": { + "tags": [ + "/volume_group" + ], + "summary": "Initiate a live mount for a given Volume Group snapshot", + "description": "Create a live mount request for a Volume Group snapshot.", + "operationId": "createVolumeGroupMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the mount request. The mount will expose an SMB address per recovered volume. In addition, if a target host is specified, each volume must specify a mount path. If a target host is specified but no mount paths are, they will be generated for every volume. In all cases, a single SMB share will be created for this mount. If a target host is specified, the share will only be accessible by that host.", + "required": true, + "schema": { + "$ref": "#/definitions/VolumeGroupMountSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the mount request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/node_management/cluster_ip/{ip}": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get the node which owns the cluster IP address", + "description": "Returns the node which owns provided cluster IP address. Returns 'NOT OWNED' if the IP is not owned by any node.", + "operationId": "getIpOwner", + "parameters": [ + { + "name": "ip", + "in": "path", + "description": "cluster IP for which owner needs to be fetched.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the node which owns provided cluster IP address.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Returned if IP address is invalid.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/cluster/{id}/system_status": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get system status", + "description": "Retrieves information about the status of the Rubrik cluster.", + "operationId": "getSystemStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/SystemStatus" + } + } + }, + "x-group": "cluster" + } + }, + "/mssql/availability_group": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Get summary information for Microsoft SQL availability groups", + "description": "Returns a list of summary information for Microsoft SQL availability groups. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/availability_group`.", + "operationId": "queryMssqlAvailabilityGroup", + "parameters": [ + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/MssqlAvailabilityGroupSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "mssql_availability_group" + } + }, + "/storage/array_volume_group/snapshot/{id}/browse": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "List files in volume group snapshot", + "description": "Lists all files and directories beneath a specified path in a volume group snapshot.", + "operationId": "browseStorageArrayVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path of the starting point for the directory listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/archive/object_store/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get information about an object storage archival location", + "description": "Get the archival location object for a specific object storage location.\n", + "operationId": "getObjectStoreLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an archival location that uses the S3 protocol.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Object containing information about the archival location.", + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDetail" + } + } + }, + "x-group": "archival" + }, + "patch": { + "tags": [ + "/archive" + ], + "summary": "Update an object storage archival location", + "description": "Update the properties of an object storage location. To update the bucket count, specify a value equal to or greater than the existing bucket count.\n", + "operationId": "updateObjectStoreLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the archival location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "updated_definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreUpdateDefinition" + } + } + ], + "responses": { + "200": { + "description": "Returns the successfully updated archival location object.", + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDetail" + } + } + }, + "x-group": "archival" + } + }, + "/host_fileset/{id}": { + "get": { + "tags": [ + "/host_fileset" + ], + "summary": "Get detail information for a host", + "description": "Retrieve detail information for a registered host. For each host, we also retrieve the detail information for the filesets applied to the host.", + "operationId": "getHostFileset", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the registered host.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary information for the specified host.", + "schema": { + "$ref": "#/definitions/HostFilesetDetail" + } + } + }, + "x-group": "host_fileset" + } + }, + "/host/share/{id}/search": { + "get": { + "tags": [ + "/host/share" + ], + "summary": "Search for a file within a network share", + "description": "Search for a file within a network share. Search via full path prefix or filename prefix.", + "operationId": "searchHostShare", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the network share to search.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Either path prefix or filename prefix.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "hosts" + } + }, + "/browse": { + "get": { + "tags": [ + "/browse" + ], + "summary": "Lists all files and directories in a given path", + "description": "Lists all files and directories in a given path.", + "operationId": "browseQuery", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "ID of the Snapshot that is browsed.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path of the starting point for the directory listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "search" + } + }, + "/cloud_on/aws/recommended_instance_type": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get the recommended instance type for a given snapshot", + "description": "Get the recommended instance type for a given snapshot.", + "operationId": "getAwsRecommendedInstanceType", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "snapshot ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the recommended instance type.", + "schema": { + "$ref": "#/definitions/RecommendedInstanceType" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/cluster/{id}/brik_count": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get cluster brik count", + "description": "Retrieve the number of briks in this Rubrik cluster.", + "operationId": "getClusterBrikCount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Brik count.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/cloud_on/aws/instance": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Create an instance on AWS\n", + "description": "Create an instance at a specified location on AWS using a specified snapshot or cloud image. When a cloud image does not exist, one is created to use for the instantiation. For a cloud image, the imageId member is required. For a snapshot, the snapshotId and snappableId members are both required.\n", + "operationId": "createAwsPublicCloudMachineInstance", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Request to create a new Aws public cloud instance.", + "required": true, + "schema": { + "$ref": "#/definitions/CreateCloudInstanceRequest" + } + } + ], + "responses": { + "202": { + "description": "Status for the Aws instantiation request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Query for a list of a AWS cloud instances", + "description": "Query for a list of a AWS cloud instances.", + "operationId": "queryAwsPublicCloudMachineInstance", + "parameters": [ + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is to return all available records.\n", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "snappable_name", + "in": "query", + "description": "Filters results to instances with source snappable names containing the given query.\n", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "default": "SourceVmName", + "enum": [ + "SourceVmName", + "LocationName", + "InstanceType", + "CreatedBy", + "Status", + "SnapshotTime", + "CreationTime" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for Aws cloud instances.", + "schema": { + "$ref": "#/definitions/AwsInstanceSummaryListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/config/usersettable_crystal/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Crystal configuration", + "description": "Reset the global Crystal configuration.", + "operationId": "resetUserSettableCrystalConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalCrystalConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCrystalConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/report/data_source/table": { + "post": { + "tags": [ + "/report" + ], + "summary": "Get table data from data soruce based on ReportTableRequest", + "description": "Returns the raw table data without constructing a report.", + "operationId": "getTableFromDataSource", + "parameters": [ + { + "in": "body", + "name": "data_source_table_request", + "description": "Definition of items to get from the report table.", + "required": true, + "schema": { + "$ref": "#/definitions/DataSourceTableRequest" + } + } + ], + "responses": { + "200": { + "description": "Data from the data source.", + "schema": { + "$ref": "#/definitions/DataSourceTableData" + } + } + }, + "x-group": "internal_report" + } + }, + "/app_blueprint": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get summary for Blueprints", + "description": "Retrieve summary information for all Blueprint objects.", + "operationId": "queryAppBlueprints", + "parameters": [ + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the Blueprints list on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment", + "ConnectionStatus" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Number of matches to ignore from the beginning of the results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for Blueprint objects by name.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "A Boolean value that specifies whether to filter results by the isRelic field of the Blueprint. When 'true,' the results show only relic Blueprints. When 'false,' the results show only non-relic Blueprints. By default, shows both relic and non-relic Blueprints.", + "required": false, + "type": "boolean" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for Blueprints.", + "schema": { + "$ref": "#/definitions/AppBlueprintSummaryListResponse" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/nutanix/vm": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get list of Nutanix VMs", + "description": "Get summary of all the VMs.", + "operationId": "queryNutanixVm", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of VM. Return both relic and non-relic VM if this query is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "folderPath", + "infraPath" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Get page summary about Nutanix virtual machine.", + "schema": { + "$ref": "#/definitions/NutanixVmSummaryListResponse" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/archive/nfs/{id}": { + "patch": { + "tags": [ + "/archive" + ], + "summary": "Update an NFS archival location", + "description": "Update the properties of an NFS archival location.", + "operationId": "updateNfsLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the archival location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "updated_definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/NfsLocationUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns the updated NFS location object.", + "schema": { + "$ref": "#/definitions/NfsLocationDetail" + } + } + }, + "x-group": "archival" + } + }, + "/unmanaged_object/{id}/snapshot/bulk_delete": { + "post": { + "tags": [ + "/unmanaged_object" + ], + "summary": "(DEPRECATED)Bulk delete specified unmanaged snapshots for the given object. Must specify object type as well", + "description": "Bulk delete unmanaged snapshots specified by snapshotIds for the given object. API returning success does not gurantee that the snapshot will be expired.", + "operationId": "bulkDeleteUnmanagedSnapshotsForObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "delete_snapshots_config", + "description": "List of snapshotIds to delete and the type of object the snapshots are of.", + "required": true, + "schema": { + "$ref": "#/definitions/BulkDeleteUnmanagedSnapshotsConfig" + } + } + ], + "responses": { + "204": { + "description": "OK on success, success doesn't imply all snapshots will be deleted." + }, + "422": { + "description": "Returned if delete API fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "unmanaged_object" + } + }, + "/organization/{id}/nas/share/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get nas share metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getNasShareMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specify the organization id.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_host" + } + }, + "/smb/domain": { + "post": { + "tags": [ + "/smb" + ], + "summary": "Add a new domain", + "description": "Add a new domain manually and join Active Directory.", + "operationId": "addAndJoinSmbDomain", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for joining Active Directory.", + "required": true, + "schema": { + "$ref": "#/definitions/SmbDomainAddRequest" + } + } + ], + "responses": { + "200": { + "description": "Added SMB Active Directory domain.", + "schema": { + "$ref": "#/definitions/SmbDomainDetail" + } + } + }, + "x-group": "smb" + }, + "get": { + "tags": [ + "/smb" + ], + "summary": "Get discovered SMB domains", + "description": "Get discovered SMB domains.", + "operationId": "listSmbDomains", + "parameters": [], + "responses": { + "200": { + "description": "Discovered SMB Active Directory domains.", + "schema": { + "$ref": "#/definitions/SmbDomainDetailListResponse" + } + } + }, + "x-group": "smb" + } + }, + "/oracle/hierarchy/{id}/children": { + "get": { + "tags": [ + "/oracle/hierarchy" + ], + "summary": "Get list of immediate descendant objects", + "description": "Retrieve the list of immediate descendant objects for the specified parent.", + "operationId": "getOracleHierarchyChildren", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filter a response by making an infix comparison of the Oracle RAC name or standalone host name, names of databases on the RAC or the host, database SIDs and database tablespaces in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "id", + "in": "path", + "description": "ID of the parent Oracle hierarchy object. To get top-level nodes, use **root** as the ID.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "AppBlueprint", + "AwsAccount", + "CloudCompute", + "CloudComputeRegion", + "CloudNativeAuthzRoot", + "ComputeCluster", + "DataCenter", + "DataStore", + "Ec2Instance", + "ExclusionPattern", + "ExclusionPatternAuthzRoot", + "Folder", + "Hdfs", + "HostFailoverCluster", + "HostRoot", + "HypervAuthzRoot", + "HypervCluster", + "HypervScvmm", + "HypervServer", + "HypervVirtualMachine", + "FailoverClusterApp", + "KuprHost", + "KuprHostAuthzRoot", + "LinuxFileset", + "LinuxHost", + "LinuxHostAuthzRoot", + "ManagedVolume", + "ManagedVolumeAuthzRoot", + "ManagedVolumeRoot", + "MssqlAuthzRoot", + "MssqlDatabase", + "MssqlAvailabilityGroup", + "MssqlInstance", + "NasHost", + "NasHostAuthzRoot", + "NasSystem", + "NfsHostShare", + "NutanixAuthzRoot", + "NutanixCluster", + "NutanixVirtualMachine", + "OracleAuthzRoot", + "OracleDatabase", + "OracleHost", + "OracleRac", + "OracleRoot", + "SapHanaAuthzRoot", + "SapHanaDatabase", + "SapHanaSystem", + "ShareFileset", + "SlaDomain", + "SmbHostShare", + "StorageArray", + "StorageArrayVolume", + "StorageArrayVolumeGroup", + "Storm", + "User", + "vCenter", + "Vcd", + "VcdAuthzRoot", + "VcdCatalog", + "VcdOrg", + "VcdOrgVdc", + "VcdVapp", + "VcdVimServer", + "VirtualMachine", + "VmwareAuthzRoot", + "VmwareHost", + "VmwareResourcePool", + "VmwareStoragePolicy", + "VmwareTag", + "VmwareTagCategory", + "WindowsCluster", + "WindowsFileset", + "WindowsHost", + "WindowsHostAuthzRoot", + "WindowsVolumeGroup" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "status", + "in": "query", + "description": "Filter by status.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendentCount.rac", + "descendentCount.oracleHost", + "descendentCount.db" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Determines whether to fetch Oracle hierarchy objects with additional privilege checks.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/OracleHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "oracle_hierarchy" + } + }, + "/polaris/app_blueprint/{id}": { + "delete": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Delete a Blueprint object", + "description": "Delete a Blueprint object.", + "operationId": "deleteAppBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Blueprint successfully deleted." + } + }, + "x-group": "app_blueprint" + }, + "patch": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Update a Blueprint object", + "description": "Make changes to the parameters of a specified Blueprint object.", + "operationId": "updateAppBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Blueprint object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "patch_properties", + "description": "Parameters to use to update the specified Blueprint object.", + "required": true, + "schema": { + "$ref": "#/definitions/AppBlueprintPatch" + } + } + ], + "responses": { + "200": { + "description": "Details of an updated Blueprint object.", + "schema": { + "$ref": "#/definitions/AppBlueprintDetail" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/job/type/vmJobMaintainer": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Creates a job that maintains other VM jobs", + "description": "REQUIRES SUPPORT TOKEN - Creates a job that maintains other VM jobs. A support token is required for this operation.", + "operationId": "vmJobMaintainer", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_job" + } + }, + "/hyperv/hierarchy/{id}": { + "get": { + "tags": [ + "/hyperv/hierarchy" + ], + "summary": "Get summary of a hierarchy object", + "description": "Retrieve details for the specified hierarchy object.", + "operationId": "getHypervHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of the hierarchy object.", + "schema": { + "$ref": "#/definitions/HypervHierarchyObjectSummary" + } + } + }, + "x-group": "hyperv_hierarchy" + } + }, + "/snapshot/garbage_collect_status": { + "post": { + "tags": [ + "/snapshot" + ], + "summary": "REQUIRES SUPPORT TOKEN - Check whether snapshots have been GC''d", + "description": "REQUIRES SUPPORT TOKEN - Check whether snapshots have been GC''d. A support token is required for this operation.", + "operationId": "snapshotGarbageCollectionStatus", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "List of snapshot IDs to check if GC'd.", + "required": true, + "schema": { + "$ref": "#/definitions/InternalSnapshotGarbageCollectStatusConfig" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/InternalGarbageCollectStatusResult" + } + } + }, + "x-group": "internal_snapshot" + } + }, + "/job/{job_id}/instances": { + "get": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Get list of job instances for job ID", + "description": "REQUIRES SUPPORT TOKEN - Get instances by job ID and filter by job instance status.", + "operationId": "getJobInstances", + "parameters": [ + { + "name": "job_id", + "in": "path", + "description": "ID of the job to fetch instances.", + "required": true, + "type": "string" + }, + { + "name": "status", + "in": "query", + "description": "Filter by status of the job instance.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of JobInstances corresponding to job_id which are filtered by their running status.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/InternalJobInstanceDetail" + } + } + } + }, + "x-group": "internal_job" + } + }, + "/organization/{id}/vcd": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized vCD resources in an organization", + "description": "Retrieve summary information for the explicitly authorized vCD resources of an organization. Information for a vCD resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedVcdResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get vCD resources.", + "schema": { + "$ref": "#/definitions/VcdHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "organization_vcd" + } + }, + "/polaris/report/report_job_instance": { + "get": { + "tags": [ + "/polaris/report" + ], + "summary": "Get entries from report_job_instance table", + "description": "Get report job instance entries from the report job instance table.", + "operationId": "getReportJobInstanceEntries", + "parameters": [ + { + "name": "start_date", + "in": "query", + "description": "The start date of the report job instance.", + "required": true, + "type": "string", + "format": "date-time" + }, + { + "name": "end_date", + "in": "query", + "description": "The end date of the report job instance.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "day_limit", + "in": "query", + "description": "The maximum number of days to query.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "grace_period", + "in": "query", + "description": "The grace period allowed (in seconds) for the time difference between nodes.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "after_id", + "in": "query", + "description": "Filter the report task entries after this ID.", + "required": false, + "type": "string" + }, + { + "name": "should_include_extra_data", + "in": "query", + "description": "A Boolean value that determines whether or not the response should include the object instance, SLA domain, and data location information.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Report. job instance entries.", + "schema": { + "$ref": "#/definitions/ReportJobInstanceResponse" + } + } + }, + "x-group": "polaris_report" + } + }, + "/cloud_on/aws/image/{id}": { + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Delete a given AWS cloud image", + "description": "Delete a given AWS cloud image.", + "operationId": "deleteAwsPublicCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS image.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the AWS image deletion request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get details about a given AWS cloud image", + "description": "Get details about a given AWS cloud image.", + "operationId": "getAwsPublicCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS cloud image.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the AWS cloud image.", + "schema": { + "$ref": "#/definitions/AwsImageDetail" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/vmware/vm/virtual_disk": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Get a list of all the Virtual Disks", + "description": "Get a list of all the Virtual Disks.", + "operationId": "queryVirtualDisk", + "parameters": [], + "responses": { + "200": { + "description": "Return details about all virtual disks.", + "schema": { + "$ref": "#/definitions/VirtualDiskSummaryListResponse" + } + } + }, + "x-group": "virtual_disk" + } + }, + "/storage/array_volume_group/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Download files", + "description": "Initiate an asynchronous job to download files from a backup of a storage array volume group. Returns a job instance ID. An email notification will be sent out when the download is ready. When the download is ready, the file can be downloaded from the corresponding event which includes the job instance ID as the value of **jobInstanceId**.", + "operationId": "createDownloadStorageArrayVolumeGroupSnapshotFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot of a storage array volume group.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a download request of files from a specified volume group snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous job for file download from specified volume group snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/oracle/db/{id}/snapshot": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "On-demand backup of an Oracle database", + "description": "Create an asynchronous job for an on-demand snapshot of an Oracle database. The response includes an ID for the asynchronous job request. To see the status of the request, poll /oracle/request/{id}.", + "operationId": "createOnDemandOracleBackup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand snapshot of an Oracle database.", + "required": true, + "schema": { + "$ref": "#/definitions/OracleBackupJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for an async job to create an on-demand snapshot of an Oracle database.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + }, + "delete": { + "tags": [ + "/oracle" + ], + "summary": "Delete Oracle database snapshots", + "description": "Delete all snapshots for a specified Oracle database object. For the operation to succeed the referenced database must not be assigned to an SLA Domain.", + "operationId": "deleteAllOracleDbSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully removed all snapshots for the specified Oracle database." + } + }, + "x-group": "oracle_db" + }, + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get information about Oracle database snapshots", + "description": "Retrieve summary information about the snapshots of a specified Oracle database object.", + "operationId": "queryOracleDbSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + }, + { + "name": "after_time", + "in": "query", + "description": "Filter the matches in the response to include only the snapshots taken on or after the time specified by a date-time string. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "before_time", + "in": "query", + "description": "Filter the matches in the response to include only the snapshots taken on or after the time specified by a date-time string. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + } + ], + "responses": { + "200": { + "description": "Results from a database snapshots query.", + "schema": { + "$ref": "#/definitions/OracleDbSnapshotSummaryListResponse" + } + } + }, + "x-group": "oracle_db" + } + }, + "/aws/ec2_instance/{id}/inplace_restore": { + "post": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Restore in-place an EC2 instance", + "description": "Initiate the in-place restore of an EC2 instance from a given snapshot.", + "operationId": "inplaceRestoreAwsEc2Instance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an EC2 instance object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the in-place restore of an EC2 instance.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstanceInplaceRestoreConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous request for an EC2 object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/mssql/db/count": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns a count of Microsoft SQL databases", + "description": "Returns a count of Microsoft SQL databases. This endpoint will be removed CDM v6.0 in favor of `GET v1/mssql/db/count`.", + "operationId": "countMssqlDb", + "parameters": [ + { + "name": "root_id", + "in": "query", + "description": "Include only instances that belong to this root.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returned if the query was successful.", + "schema": { + "$ref": "#/definitions/ProtectedObjectsCount" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/organization": { + "post": { + "tags": [ + "/organization" + ], + "summary": "Create new Organization", + "description": "Create a new Organization object with the specified resource parameters.", + "operationId": "createOrganization", + "parameters": [ + { + "in": "body", + "name": "organization_create", + "description": "Parameters for creating an Organization object.", + "required": true, + "schema": { + "$ref": "#/definitions/OrganizationCreate" + } + } + ], + "responses": { + "201": { + "description": "Details of new Organization object.", + "schema": { + "$ref": "#/definitions/OrganizationSummary" + } + } + }, + "x-group": "organization" + }, + "get": { + "tags": [ + "/organization" + ], + "summary": "Get list of all Organizations", + "operationId": "queryOrganizations", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Search by Organization Name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the list of Organizations on.", + "required": false, + "type": "string", + "enum": [ + "name", + "resourceCount", + "userCount", + "organizationAdmin" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "is_global", + "in": "query", + "description": "Filter the query results based on whether the organizations are the Global Organization.", + "required": false, + "type": "boolean" + }, + { + "name": "offset", + "in": "query", + "description": "Starting offset of the returned results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of results to return.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Get page of Organizations.", + "schema": { + "$ref": "#/definitions/OrganizationDetailListResponse" + } + } + }, + "x-group": "organization" + } + }, + "/storage/array/request/{id}": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get storage array request", + "description": "Retrieve the status and details of a specified asynchronous request for a storage array object.", + "operationId": "getStorageArrayAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request for a storage array object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous request for a storage array object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array" + } + }, + "/hyperv/vm/{id}/missed_snapshot": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get details about missed snapshots for a VM", + "description": "Retrieve the time of the day when the snapshots were missed specific to a vm.", + "operationId": "queryHypervVirtualMachineMissedSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about missed snapshots for a VM.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/storage/array/hierarchy/{id}": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get information for hierarchy object", + "description": "Get information about a specific storage array hierarchy object.", + "operationId": "getStorageArrayHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary information for a storage array hierarchy object.", + "schema": { + "$ref": "#/definitions/StorageArrayHierarchyObjectSummary" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/polaris/replication/source/replicate/{snappable_id}": { + "post": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Replicate snapshots for the snappable of the specified ID", + "description": "Replicate snapshots for the snappable of the specified ID. The ID is the snappable ID and the snappable can be an EC2 instance.", + "operationId": "schedulePolarisSourcePullReplicate", + "parameters": [ + { + "name": "snappable_id", + "in": "path", + "description": "Snappable ID of which we are replicating snapshots.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Polaris source pull replicate definition.", + "required": true, + "schema": { + "$ref": "#/definitions/PolarisPullReplicateDefinition" + } + } + ], + "responses": { + "202": { + "description": "Polaris replication pull replicate succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/cluster/{id}/memory_capacity": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get installed memory capacity", + "description": "Retrieve total installed memory on the Rubrik cluster.", + "operationId": "getMemoryCapacity", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Memory capacity of the Rubrik cluster.", + "schema": { + "$ref": "#/definitions/Capacity" + } + } + }, + "x-group": "cluster" + } + }, + "/storage/array_volume_group/{id}/snapshot": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Initiate an on-demand snapshot for a volume group", + "description": "Start an async job to create an on-demand snapshot for a specified storage array volume group.", + "operationId": "createOnDemandStorageArrayBackup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a job to create an on-demand snapshot of a storage array volume group.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Task object for an async snapshot job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + }, + "delete": { + "tags": [ + "/storage/array" + ], + "summary": "Delete volume group snapshots", + "description": "Delete all snapshots for a specified volume group.", + "operationId": "deleteStorageArrayVolumeGroupSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "x-group": "storage_array_volume_group" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get list of snapshots of Volume Group", + "description": "Retrieve the following information for all snapshots for a Volume Group: ID, snapshot date, expiration date, type of source object, name of Volume Group, type of snapshot, state of the cloud, level of consistency, name of snapshot Volume Group, index state, total number of files, IDs of all replication location, IDs of all archival locations.", + "operationId": "queryStorageArrayVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary information for all snapshots.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupSnapshotSummaryListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/organization/{id}/replication/target": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get replication targets associated with this organization", + "description": "Retrieve the total list of replication targets that have been granted to this organization.", + "operationId": "getOrganizationReplicationTargets", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of Replication Targets that have been assigned to this organization.", + "schema": { + "$ref": "#/definitions/ReplicationTargetSummaryListResponse" + } + } + }, + "x-group": "organization_replication" + } + }, + "/polaris/failover/target/{id}/resume": { + "put": { + "tags": [ + "/polaris/failover" + ], + "summary": "Resume a paused failover to this location", + "description": "Provide the details required to resume a paused failover of a Blueprint to the current location.", + "operationId": "resumeFailoverOnTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the failover operation.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Details required to resume the failover.", + "required": true, + "schema": { + "$ref": "#/definitions/TriggerFailoverOnTargetDefinition" + } + } + ], + "responses": { + "202": { + "description": "Failover job succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "failover" + } + }, + "/organization/{id}/oracle/db/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get Oracle database metrics", + "description": "Returns the total number of objects, the total number of protected objects, and the total number of unprotected objects.", + "operationId": "getOracleDbMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_oracle" + } + }, + "/cloud_on/aws/instance/{id}": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get details about a given AWS cloud instance", + "description": "Get details about a given AWS cloud instance.", + "operationId": "getAwsPublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the AWS cloud instance.", + "schema": { + "$ref": "#/definitions/AwsInstanceDetail" + } + } + }, + "x-group": "cloud_instance" + }, + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Remove entry of a given AWS cloud instance", + "description": "Remove entry of a given AWS cloud instance. This deletes the instance metadata from Rubrik but doesn't terminate the instance running on cloud. This is an irreversible operation.\n", + "operationId": "removeAwsPublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the AWS cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully stopped managing AWS cloud instance." + } + }, + "x-group": "cloud_instance" + } + }, + "/host/bulk": { + "post": { + "tags": [ + "/host" + ], + "summary": "Register hosts", + "description": "Register hosts with Rubrik clusters.", + "operationId": "bulkRegisterHost", + "parameters": [ + { + "in": "body", + "name": "hosts", + "description": "Array containing a registration definition for each host.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/HostRegister" + } + } + } + ], + "responses": { + "201": { + "description": "Summary information from registration of hosts.", + "schema": { + "$ref": "#/definitions/HostDetailListResponse" + } + } + }, + "x-group": "hosts" + }, + "patch": { + "tags": [ + "/host" + ], + "summary": "Update multiple hosts with properties", + "description": "Change the FQDN and IPv4 address that is assigned to a host object. Enable or disable pre-transfer data compression. Enable or disable change block tracking (CBT) for backups of SQL Server databases on Windows hosts. Enable or disable volume filter driver (VFD) for volume backups on Windows hosts.", + "operationId": "bulkUpdateHost", + "parameters": [ + { + "in": "body", + "name": "host_update_properties", + "description": "Properties to update for each host.", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/HostUpdateId" + } + } + } + ], + "responses": { + "200": { + "description": "Returns a detailed view of all updated hosts.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/HostDetail" + } + } + } + }, + "x-group": "hosts" + } + }, + "/report/{id}/chart": { + "get": { + "tags": [ + "/report" + ], + "summary": "Get chart data for a report", + "description": "Retrieve chart data associated with the report specified by id.", + "operationId": "getCharts", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + }, + { + "name": "chart_id", + "in": "query", + "description": "ID of the chart.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of chart columns to return.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Chart data for the report.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ChartData" + } + } + } + }, + "x-group": "internal_report" + } + }, + "/vmware/datastore/{id}": { + "get": { + "tags": [ + "/vmware/datastore" + ], + "summary": "Get details about the specific DataStore", + "description": "Get details about the specific DataStore.", + "operationId": "getDatastore", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the DataStore that needs to be fetched.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details of the DataStore.", + "schema": { + "$ref": "#/definitions/DataStoreDetail" + } + } + }, + "x-group": "datastore" + } + }, + "/oracle/db/{id}/recoverable_range": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get recoverable ranges of a Oracle database", + "description": "Retrieve the recoverable ranges for a specified Oracle database. A begin and/or end timestamp can be provided to retrieve only the ranges that fall within the window.", + "operationId": "getOracleDbRecoverableRanges", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Oracle database.", + "required": true, + "type": "string" + }, + { + "name": "after_time", + "in": "query", + "description": "Filter ranges to end after this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678Z\".", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "before_time", + "in": "query", + "description": "Filter ranges to start before this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + } + ], + "responses": { + "200": { + "description": "Returns the recoverable ranges for the Oracle database.", + "schema": { + "$ref": "#/definitions/OracleRecoverableRangeListResponse" + } + } + }, + "x-group": "oracle_db" + } + }, + "/nutanix/vm/request/{id}": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get VM async request details", + "description": "Get details about a Nutanix VM-related async request.", + "operationId": "getNutanixVmAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/node/{id}/stats": { + "get": { + "tags": [ + "/node" + ], + "summary": "Fetches time ranged statistics about the specified Node", + "description": "Statistics about the specific Node given a time range. One hour is default.", + "operationId": "getNodeStats", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of the node to fetch statistics for.", + "required": true, + "type": "string" + }, + { + "name": "range", + "in": "query", + "description": "Starting point for a time series. The starting point is expressed as -, where is an integer and is one of: s(seconds), m(minutes), h(hours), d(days). Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Statistics about the node.", + "schema": { + "$ref": "#/definitions/NodeStats" + } + } + }, + "x-group": "node" + } + }, + "/archive/dca/csr": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Generate a new private key and return a certificate signing request", + "description": "Generates a new private key and return base64 encoded PKCS#10 certificate signing request. If there is a pending DCA CSR, this will invalidate it and create a new one.", + "operationId": "generateDcaCsr", + "parameters": [ + { + "in": "body", + "name": "csr_request", + "description": "Information for client certificate request.", + "required": true, + "schema": { + "$ref": "#/definitions/CsrRequest" + } + } + ], + "responses": { + "200": { + "description": "Certificate signing request.", + "schema": { + "$ref": "#/definitions/Pkcs10CertificateSigningRequest" + } + } + }, + "x-group": "archival" + } + }, + "/organization/{id}/envoy": { + "post": { + "tags": [ + "/organization" + ], + "summary": "Create a Rubrik Envoy object", + "description": "Create a Rubrik Envoy object for a specified organization and specify the properties to assign to the Rubrik Envoy object.", + "operationId": "createEnvoy", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an organization object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "definition", + "description": "Properties to assign to a specified Rubrik Envoy object.", + "required": true, + "schema": { + "$ref": "#/definitions/EnvoyCreate" + } + } + ], + "responses": { + "200": { + "description": "Successfully created a Rubrik Envoy object.", + "schema": { + "$ref": "#/definitions/EnvoyDetail" + } + } + }, + "x-group": "organization_resource" + }, + "get": { + "tags": [ + "/organization" + ], + "summary": "Get Rubrik Envoy list for an organization", + "description": "Retrieve a list of all of the Rubrik Envoy objects that are assigned to a specified organization.", + "operationId": "getEnvoyList", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an organization object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of Rubrik Envoy objects that are assigned to a specified organization.", + "schema": { + "$ref": "#/definitions/EnvoySummaryListResponse" + } + } + }, + "x-group": "organization_resource" + } + }, + "/storage/array/volume/{id}": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get storage array volume details", + "description": "Retrieve the details for a storage array volume object by using the assigned ID.", + "operationId": "getStorageArrayVolume", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details for a specified storage array volume object.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeDetail" + } + } + }, + "x-group": "storage_array" + } + }, + "/aws/ec2_instance/request/{id}": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get request status for EC2 instance", + "description": "Retrieve the details of an asynchronous request for an EC2 instance object.", + "operationId": "getAwsEc2InstanceAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous request for an EC2 object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/cloud_on/azure/instance": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Create an instance on Azure\n", + "description": "Create an instance at a specified location on Azure using a specified snapshot or cloud image. When a cloud image does not exist, one is created to use for the instantiation. For a cloud image, the imageId member is required. For a snapshot, the snapshotId and snappableId members are both required.\n", + "operationId": "createAzurePublicCloudMachineInstance", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Request to create a new Azure public cloud instance.", + "required": true, + "schema": { + "$ref": "#/definitions/CreateCloudInstanceRequest" + } + } + ], + "responses": { + "202": { + "description": "Status for the instantiation request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Query for a list of a Azure cloud instances", + "description": "Query for a list of a Azure cloud instances.", + "operationId": "queryAzurePublicCloudMachineInstance", + "parameters": [ + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is to return all available records.\n", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "snappable_name", + "in": "query", + "description": "Filters results to instances with source snappable names containing the given query.\n", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "default": "SourceVmName", + "enum": [ + "SourceVmName", + "LocationName", + "InstanceType", + "CreatedBy", + "Status", + "SnapshotTime", + "CreationTime" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for all Azure cloud instances.", + "schema": { + "$ref": "#/definitions/AzureInstanceSummaryListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/hyperv/vm/{id}/snapshot": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Create on-demand VM snapshot", + "description": "Create an on-demand snapshot for the given VM ID.", + "operationId": "createHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the VM.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand backup.", + "required": false, + "schema": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the backup request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + }, + "delete": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Delete all snapshots of VM", + "description": "Delete all snapshots of a virtual machine.", + "operationId": "deleteHypervVirtualMachineSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Virtual machine ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshots successfully deleted." + } + }, + "x-group": "hyperv_vm" + }, + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get list of snapshots of VM", + "description": "Retrieve the following information for all snapshots for a VM: ID, snapshot date, expiration date, type of source object, Name of VM, Name of fileset, type of snapshot, state of the cloud, level of consistency, name of snapshot VM, Index of state, total number of file, IDs of all replication location, IDs of all archival location.", + "operationId": "queryHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns summary information for all snapshots.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineSnapshotSummaryListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/managed_volume/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Download files from a managed volume backup", + "description": "Start an asynchronous job to download multiple files and folders from a specified managed volume backup. The response returns an asynchronous request ID. Get the URL for downloading the ZIP file including the specific files/folders by sending a GET request to 'managed-volume/request/{id}'.", + "operationId": "createManagedVolumeDownloadFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a managed volume backup object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration information for a job to download files and folders from a managed volume backup.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an asynchronous job to download files and folders from a managed volume backup.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/folder/host/{datacenter_id}": { + "get": { + "tags": [ + "/folder" + ], + "summary": "Get the top level host folder", + "description": "The top level host folder.", + "operationId": "queryHostFolder", + "parameters": [ + { + "name": "datacenter_id", + "in": "path", + "description": "ID of the datacenter.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a host folder.", + "schema": { + "$ref": "#/definitions/FolderDetail" + } + } + }, + "x-group": "folder" + } + }, + "/storage/array_volume_group/snapshot/{id}": { + "delete": { + "tags": [ + "/storage/array" + ], + "summary": "Delete a volume group snapshot", + "description": "Delete a snapshot of a specified volume group by expiring it. The snapshot cannot be expired unless it is a on-demand snapshot or a snapshot of an unprotected volume group.", + "operationId": "deleteStorageArrayVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot of a storage array volume group.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Specifies the location of a snapshot.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "x-group": "storage_array_volume_group" + }, + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get volume group snapshot details", + "description": "Get details of a specified volume group snapshot.", + "operationId": "getStorageArrayVolumeGroupSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot of a storage array volume group.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a snapshot.", + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupSnapshotDetail" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/data_location/nfs": { + "delete": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Tries to clean up the data in the bucket in the NFS spec", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to clean the buckets. A support token is required for this operation.", + "operationId": "cleanNfsBucket", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Data Location definition.", + "required": true, + "schema": { + "$ref": "#/definitions/NfsLocationDefinition" + } + } + ], + "responses": { + "204": { + "description": "Returned if bucket was successfully emptied." + } + }, + "x-group": "archival" + } + }, + "/config/usersettable_forge": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Forge configuration", + "description": "Fetch the global Forge configuration.", + "operationId": "getUserSettableForgeConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalForgeConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Forge configuration", + "description": "Update the global Forge configuration.", + "operationId": "updateUserSettableForgeConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalForgeConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalForgeConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/vmware/vcenter/{id}/refresh_vm": { + "post": { + "tags": [ + "/vmware/vcenter" + ], + "summary": "(DEPRECATED) Refresh single virtual machine metadata in a vcenter", + "description": "Refresh a single virtual machine metadata in a vcenter. This endpoint will be removed after CDM v5.3 in favor of `POST /v1/vmware/vcenter/{id}/refresh_vm`.", + "operationId": "createRefreshVmInternal", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vCenter Server.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration of the vm request to refresh.", + "required": true, + "schema": { + "$ref": "#/definitions/RefreshVmRequest" + } + } + ], + "responses": { + "204": { + "description": "The vm metadata is refreshed successfully." + } + }, + "deprecated": true, + "x-group": "vcenter" + } + }, + "/polaris/failover/target/{id}/cancel": { + "put": { + "tags": [ + "/polaris/failover" + ], + "summary": "Cancel a paused failover to this location", + "description": "Provide the details required to cancel a paused failover of a Blueprint to the current location.", + "operationId": "cancelFailoverOnTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the failover operation.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Cancel failover job succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "failover" + } + }, + "/vmware/vm/update/bulk": { + "patch": { + "tags": [ + "/vmware/vm" + ], + "summary": "Update VMs in bulk", + "description": "Update VMs with specified properties.", + "operationId": "vmBulkUpdate", + "parameters": [ + { + "in": "body", + "name": "update", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/VirtualMachineBulkUpdate" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/Map_VmEndPointStatus" + } + } + }, + "x-group": "vm" + } + }, + "/managed_object/bulk/summary": { + "post": { + "tags": [ + "/managed_object" + ], + "summary": "Gets the summaries of a set of managed objects", + "description": "Gets the summaries of a set of managed objects.", + "operationId": "getBulkSummary", + "parameters": [ + { + "in": "body", + "name": "objects", + "description": "List of managed IDs whose summaries to get.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedIdList" + } + }, + { + "name": "sort_attr", + "in": "query", + "description": "Comma-separated list of attributes by which to sort: name|objectType|hostname.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "sort_order", + "in": "query", + "description": "Comma-separated list of sort orders (one for each sort_attr): asc|desc.", + "required": false, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Summaries of each managed object.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "managed_object" + } + }, + "/stats/physical_ingest_per_day/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get a timeseries on physical data ingestion per day", + "description": "Get a timeseries on physical data ingestion per day.", + "operationId": "physicalIngestPerDay", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/report/template": { + "get": { + "tags": [ + "/report" + ], + "summary": "Retrieve information about a specific report template", + "description": "Get report template details for the template.", + "operationId": "getReportTemplate", + "parameters": [ + { + "name": "report_template", + "in": "query", + "description": "Name of the report template.", + "required": true, + "type": "string", + "enum": [ + "CapacityOverTime", + "ObjectProtectionSummary", + "ObjectTaskSummary", + "ObjectIndexingSummary", + "ProtectionTasksDetails", + "ProtectionTasksSummary", + "RecoveryTasksDetails", + "SlaComplianceSummary", + "SystemCapacity" + ] + } + ], + "responses": { + "200": { + "description": "Report template detail with table, charts, and filters.", + "schema": { + "$ref": "#/definitions/ReportTemplateDetail" + } + } + }, + "x-group": "internal_report" + } + }, + "/vmware/vm/credential_failure/count": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Fetch count of VMs with authentication failures", + "description": "Fetch count of VMs with authentication failures.", + "operationId": "credentialFailureCount", + "parameters": [], + "responses": { + "200": { + "description": "Return a count of all credential failure.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "vm" + } + }, + "/aws/account/{id}/subnet": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get subnets by AWS account", + "description": "Retrieve a list of the subnets available for a specified AWS account ID.", + "operationId": "queryAwsAccountSubnet", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an AWS account object.", + "required": true, + "type": "string" + }, + { + "name": "region", + "in": "query", + "description": "Name of an AWS region.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Subnets available for a specified AWS account.", + "schema": { + "$ref": "#/definitions/SubnetListResponse" + } + } + }, + "x-group": "aws_account" + } + }, + "/stats/system_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get storage information for Rubrik cluster", + "description": "For a specified Rubrik cluster, retrieve all storage information in the following categories: total storage, used storage, available storage, snapshot storage, Live Mount storage, CDP storage, pending snapshot storage, and miscellaneous storage. Miscellaneous storage indicates the amount of storage reserved by the system and used by background tasks. The information also provides the time that it was last updated.", + "operationId": "systemStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attributes: total(Long), used(Long), available(Long), snapshot(Long), liveMount(Long), pendingSnapshot(Long), cdp(Long), miscellaneous(Long), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/SystemStorageStats" + } + } + }, + "x-group": "stats" + } + }, + "/cluster/{id}/bootstrap": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Bootstrap the cluster", + "description": "Issues a bootstrap request to a specified Rubrik cluster.", + "operationId": "bootstrapCluster", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "cluster_config", + "description": "Configuration for the Rubrik cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/ClusterConfig" + } + } + ], + "responses": { + "202": { + "description": "Request handle to check bootstrap status.", + "schema": { + "$ref": "#/definitions/NodeOperation" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Cluster bootstrap request status", + "description": "Retrieves status of in progress bootstrap requests.", + "operationId": "getClusterConfigStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "request_id", + "in": "query", + "description": "Id of the bootstrap request.", + "required": false, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "Status of the bootstrap request.", + "schema": { + "$ref": "#/definitions/ClusterConfigStatus" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/hyperv/vm/snapshot/mount/{id}": { + "delete": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Requst to delete a live mount", + "description": "Create a request to delete a live mount.", + "operationId": "deleteHypervVirtualMachineSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the mount to remove.", + "required": true, + "type": "string" + }, + { + "name": "force", + "in": "query", + "description": "Force unmount to deal with situations where host has been moved.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "202": { + "description": "Status for the unmount request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + }, + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get summary information for a live mount", + "description": "Retrieve the following summary information for a specified live mount: ID, snapshot date, ID of source VM, name of source VM, ID of source host, status of the mount, mount event ID, and unmount event ID.", + "operationId": "getHypervVirtualMachineSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the live mount.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns detail information for a specified live mount.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineMountDetail" + } + } + }, + "x-group": "hyperv_vm" + }, + "patch": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Power a Live Mount on and off", + "description": "Power a specified Live Mount virtual machine on or off. Pass **_true_** to power the virtual machine on and pass **_false_** to power the virtual machine off.", + "operationId": "updateHypervVirtualMachineSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Live Mount.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Power state configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervUpdateMountConfig" + } + } + ], + "responses": { + "200": { + "description": "Details of the Live Mount.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineMountDetail" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/cluster/{id}/io_stats": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get IO throughput information", + "description": "Retrieves IO statistics of the Rubrik cluster.", + "operationId": "getClusterIo", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "range", + "in": "query", + "description": "Optional time range parameter for the timeseries, eg: -1h, -1min, etc. Default value is -6h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "IO statistics for the cluster.", + "schema": { + "$ref": "#/definitions/IoStat" + } + } + }, + "x-group": "cluster" + } + }, + "/aws/ec2_instance/{id}/storage_volume": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get EC2 storage volumes", + "description": "Retrieve summary information for all storage volume objects of an EC2 instance object.", + "operationId": "queryAwsEc2InstanceStorageVolumes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an EC2 instance object.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "volume_path", + "in": "query", + "description": "Search for AWS storage volume objects by matching a string to a part of the device path of the storage volume object.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "default": "VolumePath", + "enum": [ + "VolumePath", + "VolumeId", + "VolumeSize" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "root_volume", + "in": "query", + "description": "Filter by whether volume is root.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Storage volumes for an EC2 instance.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceStorageVolumeDetailListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/polaris/app_blueprint/{id}/search": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Search for a file from a Blueprint", + "description": "Aggregated search for a file through snapshots of all workloads that are presently part of the Blueprint. Specify the file by full path prefix or filename prefix.", + "operationId": "searchAppBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Blueprint.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Use either a path prefix or a filename prefix.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "File search results.", + "schema": { + "$ref": "#/definitions/AppBlueprintSearchResponseListResponse" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/folder/vm/{datacenter_id}": { + "get": { + "tags": [ + "/folder" + ], + "summary": "Get the top level vm folder", + "description": "The top level vm folder.", + "operationId": "queryVmFolder", + "parameters": [ + { + "name": "datacenter_id", + "in": "path", + "description": "ID of the datacenter.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a vm folder.", + "schema": { + "$ref": "#/definitions/FolderDetail" + } + } + }, + "x-group": "folder" + } + }, + "/polaris/app_blueprint/shutdown/{id}": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Shut down a Blueprint before starting a production failover on Polaris", + "description": "Polaris uses these APIs to power down the virtual machines in the Blueprint in reverse boot order. Polaris uses this endpoint during a production failover, before instantiating the virtual machine images in the cloud. This endpoint is reserved for Polaris.", + "operationId": "shutdownVmsInBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Blueprint virtual machines successfully powered off." + } + }, + "x-group": "app_blueprint" + } + }, + "/report/{id}/send_email": { + "post": { + "tags": [ + "/report" + ], + "summary": "Send email for a report", + "description": "Schedule a job to send an email for the report.", + "operationId": "sendEmail", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report included in the email.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "email_summary", + "description": "Summary of the email being sent, including recipent email addresses and attachment type.", + "required": true, + "schema": { + "$ref": "#/definitions/EmailSummary" + } + } + ], + "responses": { + "202": { + "description": "Status of the asynchronous job sending the email.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "internal_report" + } + }, + "/hierarchy/{id}/sla_conflicts": { + "get": { + "tags": [ + "/hierarchy" + ], + "summary": "Get list of descendant objects with SLA conflicts", + "description": "Retrieve the list of descendant objects with an explicitly configured SLA, or inherit an SLA from a different parent.", + "operationId": "getHierarchySlaConflicts", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/ManagedHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "hierarchy" + } + }, + "/stats/snapshot_storage/live": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get live snapshot storage for all snapshots", + "description": "Get live snapshot storage for all snapshots.", + "operationId": "liveSnapshotStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/cluster/{id}/global_manager_connectivity": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Check connectivity for Polaris", + "description": "Uses ping to test connectivity with a list of URLs provided by Polaris.", + "operationId": "getGlobalManagerConnectivity", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Provides a report of which URLs were reachable.", + "schema": { + "type": "string" + } + } + }, + "x-group": "cluster" + } + }, + "/replication/source": { + "get": { + "tags": [ + "/replication" + ], + "summary": "Get summaries for all replication sources", + "description": "Retrieve the ID, name, and address for each replication source.", + "operationId": "replicationSources", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filters the retrieved list of replication targets by replication target name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the attribute to use when sorting the retrieved list of replication targets. Optionally, use **_sort_order_** to sort the list in ascending(asc) or descending(desc) order.", + "required": false, + "type": "string", + "enum": [ + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending(asc) or descending(desc).", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns a list of replication sources.", + "schema": { + "$ref": "#/definitions/ReplicationSourceSummaryListResponse" + } + } + }, + "x-group": "replication" + } + }, + "/hyperv/vm/{id}/search": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Search for file in VM", + "description": "Search for a file within the Virtual Machine. Search via full path prefix or filename prefix.", + "operationId": "searchHypervVirtualMachine", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vm.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The path query. Either path prefix or filename prefix.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/stats/snapshot_ingest/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get a timeseries of snapshots being ingested at a given time instance", + "description": "Get a timeseries of snapshots being ingested at a given time instance.", + "operationId": "snapshotIngest", + "parameters": [], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/nutanix/vm/{id}/register_agent": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Register the agent installed on the Nutanix VM", + "description": "Register the agent installed on the Nutanix VM.", + "operationId": "nutanixVmRegisterAgent", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Virtual Machine.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "The VM agent has successfully registered." + } + }, + "x-group": "nutanix_vm" + } + }, + "/organization/stats/storage_growth_timeseries": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get storage growth of an Organization", + "description": "Retrieve the storage growth of an Organization object as a time series analysis that uses bytes as the unit of measurement.", + "operationId": "getStorageGrowthTimeseries", + "parameters": [ + { + "name": "id", + "in": "query", + "description": "ID of an Organization object.", + "required": true, + "type": "string" + }, + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, -30d etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of TimeStat.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "organization" + } + }, + "/cluster/{id}/security_classification": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Returns the cluster security classification", + "description": "Returns the cluster security classification parameters.", + "operationId": "getClassificationConfiguration", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returns this code on a successful query.", + "schema": { + "$ref": "#/definitions/ClassificationConfiguration" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + }, + "put": { + "tags": [ + "/cluster" + ], + "summary": "Sets the cluster security classification parameters", + "description": "Sets the cluster security classification parameters.", + "operationId": "setClassificationConfiguration", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "classification_config", + "description": "The cluster security classification parameters.", + "required": true, + "schema": { + "$ref": "#/definitions/ClassificationConfiguration" + } + } + ], + "responses": { + "200": { + "description": "Returned if the operation was successful.", + "schema": { + "$ref": "#/definitions/ClassificationConfiguration" + } + } + }, + "x-group": "cluster" + } + }, + "/node/{id}/sessions": { + "get": { + "tags": [ + "/node" + ], + "summary": "Get list of active sessions on a node", + "description": "Returns the list of all active sessions on a node.", + "operationId": "getNodeSessions", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the node or *me* for self.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of active sessions.", + "schema": { + "$ref": "#/definitions/ActiveSessionListResponse" + } + } + }, + "x-group": "session" + } + }, + "/oracle/db/mount/{id}": { + "delete": { + "tags": [ + "/oracle" + ], + "summary": "Delete an Oracle database Live Mount", + "description": "Request an asynchronous job to delete a specified Live Mount of an Oracle database snapshot. Poll the job status by using /oracle/request/{id}.", + "operationId": "createOracleUnmount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Live Mount of an Oracle database snapshot.", + "required": true, + "type": "string" + }, + { + "name": "force", + "in": "query", + "description": "Force unmount to remove metadata for the Live Mount of an Oracle database snapshot, even when the database cannot be contacted. Default value is 'false'.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "202": { + "description": "Request status for an async job to delete the Live Mount an Oracle database snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + }, + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get an Oracle database Live Mount", + "description": "Retrieve detailed information about a specified Live Mount of an Oracle database snapshot.", + "operationId": "getOracleMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Live Mount of an Oracle database snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Information about a specified Live Mount of an Oracle database snapshot.", + "schema": { + "$ref": "#/definitions/OracleMountDetail" + } + } + }, + "x-group": "oracle_db" + } + }, + "/polaris/snapshot/request/{id}": { + "get": { + "tags": [ + "/polaris" + ], + "summary": "Get asynchronous request details for a get snapshots job", + "description": "Get the details of an asynchronous request that runs a get snapshots job.", + "operationId": "getSnapshotsRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous get snapshots request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous get snapshots job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/mssql/db/{id}/restore_files": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns a list of all database files to be restored", + "description": "Provides a list of database files to be restored for the specified restore or export operation. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/{id}/restore_files`.", + "operationId": "mssqlGetRestoreFiles", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL database.", + "required": true, + "type": "string" + }, + { + "name": "time", + "in": "query", + "description": "Time to recover to, in ISO8601 date-time format, such as \"2016-01-01T01:23:45.678\". Either this or the LSN must be specified.", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "lsn", + "in": "query", + "description": "LSN to recover to. Either this or time must be specified.", + "required": false, + "type": "string" + }, + { + "name": "recovery_fork_guid", + "in": "query", + "description": "Recovery fork GUID of LSN to recover to. Has meaning only when lsn is specified.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Database files to be restored.", + "schema": { + "type": "array", + "items": { + "description": "Database file to be restored.", + "$ref": "#/definitions/MssqlRestoreFile" + } + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/smb/domain/{domain_name}/join": { + "post": { + "tags": [ + "/smb" + ], + "summary": "Join Active Directory", + "description": "Join Active Directory.", + "operationId": "joinSmbDomain", + "parameters": [ + { + "name": "domain_name", + "in": "path", + "description": "SMB domain name.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for joining Active Directory.", + "required": true, + "schema": { + "$ref": "#/definitions/SmbDomainJoinRequest" + } + } + ], + "responses": { + "204": { + "description": "Successfully joined Active Directory domain." + } + }, + "x-group": "smb" + } + }, + "/aws/account": { + "post": { + "tags": [ + "/aws/account" + ], + "summary": "Add an AWS account", + "description": "Add an AWS account object using specified configuration.", + "operationId": "createAwsAccount", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration to use to add an AWS account object.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsAccountCreate" + } + } + ], + "responses": { + "202": { + "description": "Created request to add an AWS account.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "aws_account" + }, + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get information for AWS accounts", + "description": "Retrieve information for each AWS account object. Optionally, sort the set of returned objects.", + "operationId": "queryAwsAccounts", + "parameters": [ + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for AWS account objects by matching a string to a part of the name of the AWS account object.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the returned AWS account objects.", + "required": false, + "type": "string", + "default": "Name", + "enum": [ + "Name", + "Status" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order for the returned set of AWS account objects.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for AWS accounts.", + "schema": { + "$ref": "#/definitions/AwsAccountSummaryListResponse" + } + } + }, + "x-group": "aws_account" + } + }, + "/host/share_credential": { + "post": { + "tags": [ + "/host/share_credential" + ], + "summary": "Add a share credential to a host", + "description": "Add a share credential object to a host.", + "operationId": "addShareCredentialToHost", + "parameters": [ + { + "in": "body", + "name": "share_credential", + "description": "Host share credential creation definition.", + "required": true, + "schema": { + "$ref": "#/definitions/HostShareCredentialDefinition" + } + } + ], + "responses": { + "201": { + "description": "Summary information for host credential.", + "schema": { + "$ref": "#/definitions/HostShareCredentialDetail" + } + } + }, + "x-group": "hosts" + }, + "get": { + "tags": [ + "/host/share_credential" + ], + "summary": "Get summary information for host credential", + "description": "Retrieve summary information for host credential. Apply filters to refine the returned information.", + "operationId": "queryHostShareCredential", + "parameters": [ + { + "name": "host_id", + "in": "query", + "description": "Filter the summary information based on the host id.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary information for host credential.", + "schema": { + "$ref": "#/definitions/HostShareCredentialDetailListResponse" + } + } + }, + "x-group": "hosts" + } + }, + "/organization/{id}/aws/ec2_instance/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get AWS EC2 instance metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getAwsEc2InstanceMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_cloud_native" + } + }, + "/cloud_on/azure/image": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Query for a list of a Azure cloud images", + "description": "Query for a list of a Azure cloud images.", + "operationId": "queryAzurePublicCloudMachineImage", + "parameters": [ + { + "name": "snappable_id", + "in": "query", + "description": "Filters Azure cloud images by snappable ID.", + "required": false, + "type": "string" + }, + { + "name": "location_id", + "in": "query", + "description": "Filters Azure cloud images by location ID.", + "required": false, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is to return all available records.\n", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "snappable_name", + "in": "query", + "description": "Filters results to images with source snappable names containing the given query.\n", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "default": "SourceVmName", + "enum": [ + "SourceVmName", + "LocationName", + "InstanceType", + "CreatedBy", + "Status", + "SnapshotTime", + "CreationTime" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for all azure cloud images.", + "schema": { + "$ref": "#/definitions/AzureImageSummaryListResponse" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/stats/data_location/usage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get usage details of al data locations", + "description": "Get usage details from all data locations.", + "operationId": "dataLocationsUsage", + "parameters": [], + "responses": { + "200": { + "description": "Get a list with data location usage information.", + "schema": { + "$ref": "#/definitions/DataLocationUsageListResponse" + } + } + }, + "x-group": "stats" + } + }, + "/unmanaged_object/assign_retention_sla": { + "post": { + "tags": [ + "/unmanaged_object" + ], + "summary": "Assign relic/unmanaged entities to a SLA Domain for managing retention synchronously", + "description": "Assign relic/unmanaged entities to the specified SLA Domain for managing retention. The assignment event runs synchronously.", + "operationId": "assignToRetentionSlaAndWaitForJob", + "parameters": [ + { + "in": "body", + "name": "assignment_info", + "description": "Object with SLA Domain ID and a comma-separated list of the IDs of the relic/unmanaged entities being assigned to the SLA Domain.", + "required": true, + "schema": { + "$ref": "#/definitions/UnmanagedObjectSlaAssignmentInfo" + } + } + ], + "responses": { + "204": { + "description": "Assigned relic/unmanaged entities to the specified SLA Domain for retention." + } + }, + "x-group": "unmanaged_object" + } + }, + "/archive/location/connect_job": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get running connection jobs", + "description": "Retrieve a list of all running connection and reconnection jobs for archival locations. Returns summary information about the archival location and the job.", + "operationId": "getArchivalLocationConnectJobs", + "parameters": [], + "responses": { + "200": { + "description": "Returns running connection jobs for archival locations.", + "schema": { + "$ref": "#/definitions/ArchivalLocationConnectJobListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/cluster/{id}/security/password/zxcvbn": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Toggle usage of ZXCVBN for local users", + "description": "Toggle usage of ZXCVBN for local users.", + "operationId": "configureZxcvbn", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a Rubrik CDM instance. Use *me* for the instance that is hosting the current REST API session.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "should_enable_zxcvbn", + "description": "Boolean value that enables and disables the ZXCVBN password check on a specified Rubrik cluster. Set to true to enable the ZXCVBN password check. Set to false to disable the check.", + "required": true, + "schema": { + "$ref": "#/definitions/ZxcvbnConfig" + } + } + ], + "responses": { + "200": { + "description": "Password evaluation result.", + "schema": { + "$ref": "#/definitions/ZxcvbnStatus" + } + } + }, + "x-group": "security" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Check ZXCVBN status", + "description": "Determine whether the ZXCVBN password check is currently in use on the specified Rubrik cluster.", + "operationId": "queryZxcvbnUsage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Current state of the ZXCVBN password check.", + "schema": { + "$ref": "#/definitions/ZxcvbnStatus" + } + } + }, + "x-group": "security" + } + }, + "/mssql/db/bulk/snapshot": { + "post": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Take an on-demand backup of multiple Microsoft SQL databases", + "description": "Take an on-demand backup of one or more Microsoft SQL databases. The forceFullSnapshot property can be set to true to force a full snapshot for every database that is specified. Only one snapshot will be taken for each database, even if a database is included multiple times in the fields of the request body. To check the result of the request, poll /mssql/request/{id}. This endpoint will be removed in CDM v6.0 in favor of `POST v1/mssql/db/bulk/snapshot`.", + "operationId": "createOnDemandMssqlBatchBackup", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the on-demand backups.", + "required": true, + "schema": { + "$ref": "#/definitions/MssqlBatchBackupJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Returns the async request for the initiated on-demand backups.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/support/support_bundle": { + "post": { + "tags": [ + "/support" + ], + "summary": "Collect log files from the cluster", + "description": "To be used by Admin to collect necessary Rubrik's log files from all the nodes. Both event_id and reqeust_ids are optional. If nothing is specified, the whole support bundle is to be collected, if event_id is specified, the reqeuest_ids is ignored. If request id is specified, only collect logs related to the specific request, otherwise collect all the logs.", + "operationId": "generateSupportBundle", + "parameters": [ + { + "in": "body", + "name": "definitions", + "description": "Either event id or request id list of the request to be collected, if both are provided, request id list would be ignored.", + "required": false, + "schema": { + "$ref": "#/definitions/GenerateSupportBundleRequest" + } + } + ], + "responses": { + "202": { + "description": "Returns the status of the create support bundle request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "support" + }, + "get": { + "tags": [ + "/support" + ], + "summary": "Get the status of generating support bundle", + "description": "Given a request ID for generate support bundle request, provide the status of the request. If the request is successful, the download link for the support bundle would be included.", + "operationId": "querySupportBundle", + "parameters": [ + { + "name": "id", + "in": "query", + "description": "The support bundle generation request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the status of given request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "support" + } + }, + "/user": { + "post": { + "tags": [ + "/user" + ], + "summary": "ADMIN ONLY: Create new User", + "description": "To be used by Admin to create new User.", + "operationId": "createLocalUser", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Detail info about the user to be created.", + "required": true, + "schema": { + "$ref": "#/definitions/UserDefinition" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/UserDetail" + } + } + }, + "x-group": "user", + "x-rk-block-api-tokens": true + } + }, + "/replication/target": { + "post": { + "tags": [ + "/replication" + ], + "summary": "Configure a replication target", + "description": "Provide an address, network setup, username, and password to configure and enable a replication target. Network setup must be either 'NAT' or 'Private Network'. WARNING: This will fail if there is already an existing replication target.", + "operationId": "addReplicationTarget", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Replication. target definition.", + "required": true, + "schema": { + "$ref": "#/definitions/ReplicationTargetDefinition" + } + } + ], + "responses": { + "201": { + "description": "Replication. target at successfully added.", + "schema": { + "$ref": "#/definitions/ReplicationTargetSummary" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "replication" + }, + "get": { + "tags": [ + "/replication" + ], + "summary": "Get summary of current replication target", + "description": "Retrieve the ID, name, and address for the current replication target.", + "operationId": "replicationTargets", + "parameters": [ + { + "name": "name", + "in": "query", + "description": "Filters the retrieved list of replication targets by replication target name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the attribute to use when sorting the retrieved list of replication targets. Optionally, use **_sort_order_** to sort the list in ascending(asc) or descending(desc) order.", + "required": false, + "type": "string", + "enum": [ + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending(asc) or descending(desc).", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary of current replication target.", + "schema": { + "$ref": "#/definitions/ReplicationTargetSummaryListResponse" + } + } + }, + "x-group": "replication" + } + }, + "/archive/nfs/reader/connect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Connect to an NFS archival location as a reader", + "description": "Connect the current cluster to an existing NFS archival location as a reader. Initiates an asynchronous job to connect to the archival location.\n", + "operationId": "connectNfsAsReader", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Access credentials for the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/NfsReaderConnectDefinition" + } + } + ], + "responses": { + "202": { + "description": "The request ID for an asynchronous request to connect to an NFS archival location as a reader cluster.\n", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/hyperv/vm/snapshot/mount": { + "get": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Get summary information for all live mounts", + "description": "Retrieve the following information for all live mounts: ID, snapshot date, ID of source VM, name of source VM, ID of source host, status of the mount, mount event ID, and unmount event ID.", + "operationId": "queryHypervVirtualMachineSnapshotMount", + "parameters": [ + { + "name": "vm_id", + "in": "query", + "description": "Filters live mounts by VM ID.", + "required": false, + "type": "string" + }, + { + "name": "vm_name", + "in": "query", + "description": "Filters live mounts by VM name.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute used to sort Hyper-V snapshot mounts.", + "required": false, + "type": "string", + "enum": [ + "SnapshotDate", + "VmName", + "MountedVmName", + "PowerStatus" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is 25.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Returns summary information for all live mounts.", + "schema": { + "$ref": "#/definitions/HypervVirtualMachineMountSummaryListResponse" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/polaris/failover/target/{id}/test_failover_end": { + "put": { + "tags": [ + "/polaris/failover" + ], + "summary": "End a successful test failover to this location", + "description": "Provide the details required to terminate a successful test failover of a Blueprint to the current location.", + "operationId": "endTestFailoverOnTarget", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of the failover operation.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "End test failover job succefully scheduled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + }, + "422": { + "description": "Returned if the request fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "failover" + } + }, + "/config/usersettable_volumeGroup/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Volumegroup configuration", + "description": "Reset the global Volumegroup configuration.", + "operationId": "resetUserSettableVolumeGroupConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalVolumeGroupConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalVolumeGroupConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/stats/available_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get available storage in cluster", + "description": "Get available storage in cluster.", + "operationId": "availableStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/storage/array_volume_group/{id}/missed_snapshot": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get missed snapshots for volume group", + "description": "Retrieve a list of missed SLA Domain scheduled snapshots for a specified storage array volume group. Each entry includes a timestamp.", + "operationId": "getStorageArrayVolumeGroupMissedSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array volume group object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of missed SLA Domain scheduled snapshots for a volume group.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/polaris/nas/{id}/security_descriptor": { + "get": { + "tags": [ + "/polaris/nas" + ], + "summary": "Retrieves security descriptor of a specific file or folder", + "description": "Retrieves security descriptor of the specified file or folder. Permissions are retrieved in SDDL format.", + "operationId": "querySecurityDescriptor", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the network share to get security descriptor for.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "Path for which security descriptor needs to be retrieved.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "File or folder security descriptor.", + "schema": { + "$ref": "#/definitions/SecurityDescriptor" + } + } + }, + "x-group": "polaris" + }, + "patch": { + "tags": [ + "/polaris/nas" + ], + "summary": "Updates security descriptor for a file or folder under NAS share", + "description": "Updates security descriptor for a specific file or folder under NAS share. Security descriptor should be in SDDL format. The specified file or folder must exist under the network share.", + "operationId": "updateSecurityDescriptor", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the network share to set security descriptor for.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "security_descriptor", + "description": "Security descriptor to update for the specified file or folder.", + "required": true, + "schema": { + "$ref": "#/definitions/SecurityDescriptor" + } + } + ], + "responses": { + "200": { + "description": "Returns file or folder security descriptor, after it is updated.", + "schema": { + "$ref": "#/definitions/SecurityDescriptor" + } + } + }, + "x-group": "polaris" + } + }, + "/vmware/vm/snapshot/count": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Get a count of snapshots", + "description": "Retrieve total number of snapshots.", + "operationId": "countSnapshot", + "parameters": [], + "responses": { + "200": { + "description": "Returns total number of snapshots.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "vm" + } + }, + "/vmware/vm/credential_failure": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Fetch VMs with authentication failures", + "description": "Fetch VMs with authentication failures.", + "operationId": "getCredentialFailureVms", + "parameters": [], + "responses": { + "200": { + "description": "Return a list with brief description of virtual machine.", + "schema": { + "$ref": "#/definitions/VirtualMachineBriefListResponse" + } + } + }, + "x-group": "vm" + } + }, + "/node/{id}/cpu_cores_count": { + "get": { + "tags": [ + "/node" + ], + "summary": "Get CPU Cores count of a node", + "description": "CPU cores count of a node.", + "operationId": "getNodeCpuCoresCount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Id of the node to get cpu cores count for.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Count of cpu cores for the given node.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "node" + } + }, + "/vmware/vm/live_snapshot/count/{id}": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Count of all the live snapshots of a VM", + "description": "Count of all the live snapshots of a VM.", + "operationId": "liveSnapshotCount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Virtual Machine.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return a count of live snapshot for a vm.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "vm" + } + }, + "/report/compliance_summary_24_hours": { + "get": { + "tags": [ + "/report" + ], + "summary": "Get past 24 hour compliance summary information", + "description": "Returns the past 24 hour compliance summary information for all objects.", + "operationId": "get24HourComplianceSummary", + "parameters": [], + "responses": { + "200": { + "description": "Data from the report data source.", + "schema": { + "$ref": "#/definitions/ComplianceSummary" + } + } + }, + "x-group": "internal_report" + } + }, + "/hyperv/vm/{id}/register_agent": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Register the agent installed in VM", + "description": "Register the agent that installed in VM.", + "operationId": "hypervVmRegisterAgent", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Virtual Machine.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "The VM agent is registered successfully." + } + }, + "x-group": "hyperv_vm" + } + }, + "/data_location/teardown": { + "post": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Disconnect from a specific data location Deletes the lock files, encryption files, and all the data", + "description": "REQUIRES SUPPORT TOKEN - Disconnect from a specific data location. Deletes the lock files, encryption files and all the data. A support token is required for this operation.", + "operationId": "teardown", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "dataLocationId.", + "required": true, + "schema": { + "$ref": "#/definitions/DataLocationTeardownRequest" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/DataLocationEndpointStatus" + } + } + }, + "x-group": "archival" + } + }, + "/vcd/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/vcd/hierarchy" + ], + "summary": "(DEPRECATED) Get list of descendant objects", + "description": "Retrieve the list of descendant objects for the specified parent. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/hierarchy/{id}/descendants`.", + "operationId": "getVcdHierarchyDescendants", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent vCD hierarchy object. To get top-level nodes, use **root** as the ID.", + "required": true, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment", + "ConnectionStatus", + "VappCount" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search object by object name.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of vCD vApp hierarchy object. Return both relic and non-relic descendants if this query is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string", + "enum": [ + "Cluster", + "VimServer", + "Org", + "OrgVdc", + "Catalog", + "vApp" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters vCD hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/VcdHierarchyObjectSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_hierarchy" + } + }, + "/ods_configuration": { + "get": { + "tags": [ + "/ods_configuration" + ], + "summary": "Get on-demand snapshot configuration", + "description": "Retrieve the current on-demand snapshot configuration.", + "operationId": "getOdsConfiguration", + "parameters": [], + "responses": { + "200": { + "description": "Current on-demand snapshot configuration.", + "schema": { + "$ref": "#/definitions/OdsConfigurationSummary" + } + } + }, + "x-group": "ods_configuration" + }, + "put": { + "tags": [ + "/ods_configuration" + ], + "summary": "Update on-demand snapshot configuration", + "description": "Updates the on-demand snapshot configuration.", + "operationId": "updateOdsConfiguration", + "parameters": [ + { + "in": "body", + "name": "ods_configuration", + "description": "Configuration to use to update on-demand snapshot. configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/OdsConfigurationSummary" + } + } + ], + "responses": { + "200": { + "description": "Updated on-demand snapshot configuration.", + "schema": { + "$ref": "#/definitions/OdsConfigurationSummary" + } + } + }, + "x-group": "ods_configuration" + } + }, + "/oracle/db/mount": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get all Oracle database Live Mounts", + "description": "Retrieve a list containing summary information about all Oracle database Live Mounts.", + "operationId": "queryOracleMount", + "parameters": [ + { + "name": "source_database_id", + "in": "query", + "description": "Filter the response to include only the Live Mounts for the source Oracle database with the specified ID.", + "required": false, + "type": "string" + }, + { + "name": "source_database_name", + "in": "query", + "description": "Filter the response by making an infix comparison of the source Oracle database name values in the response with the specified value.", + "required": false, + "type": "string" + }, + { + "name": "mounted_database_name", + "in": "query", + "description": "Filter the response by making an infix comparison of the Live Mount database name values in the response with the specified value. infix search.", + "required": false, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies a Live Mount attribute to use in sorting the matches. Performs an ASCII sort of the values in the response using the specified attribute, in the order specified by sort_order.", + "required": false, + "type": "string", + "enum": [ + "SourceDatabaseName", + "MountedDatabaseName", + "CreationDate" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending. Default is 'ascending'.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of matches. The response includes the specified numbered entry and all higher numbered entries. Use with limit to retrieve the response as smaller groups of entries, for example for paging of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the summary information to a specified maximum number of matches. Optionally, use with offset to start the count at a specified point. Optionally, use with sort_by to perform sort on given attributes. Include sort_order to determine the ascending or descending direction of sort.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Summary information for all Live Mounts of Oracle database snapshots.", + "schema": { + "$ref": "#/definitions/OracleMountSummaryListResponse" + } + } + }, + "x-group": "oracle_db" + } + }, + "/archive/nfs/reconnect": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Reconnect to a specific NFS archival location", + "description": "Reconnect to a specific NFS archival location. Initiates an asynchronous job to connect to the archival location. This operation is deprecated. Use /archive/nfs/reader/connect instead.\n", + "operationId": "reconnectNfs", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Archival location credentials.", + "required": true, + "schema": { + "$ref": "#/definitions/NfsLocationReconnectSpec" + } + } + ], + "responses": { + "202": { + "description": "Returns the job instance id of the reconnect job.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "deprecated": true, + "x-group": "archival" + } + }, + "/polaris/replication/source/replicate_app/{snappable_id}/cancel": { + "post": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Cancels the replication job which is currently running for the snappable specified by ID", + "description": "Cancels the replication job which is currently running for the snappable specified by ID. The ID is the snappable ID and the snappable should be an AppBlueprint with its child EC2 instances.", + "operationId": "cancelPolarisSourcePullReplicateApp", + "parameters": [ + { + "name": "snappable_id", + "in": "path", + "description": "Snappable ID of which we are replicating snapshots.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Polaris replication pull replicate succefully canceled.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris_replication_source" + } + }, + "/node/{id}/support_tunnel": { + "get": { + "tags": [ + "/node" + ], + "summary": "Check support tunnel status for a particular node", + "description": "To be used by Admin to check status of the support tunnel.", + "operationId": "getTunnelStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the node to check the tunnel status for.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "To be used to obtain the status of support tunnels on a per node basis.", + "schema": { + "$ref": "#/definitions/SupportTunnelInfo" + } + } + }, + "x-group": "support" + }, + "patch": { + "tags": [ + "/node" + ], + "summary": "Enable or disable the SSH Tunnel for Support Access", + "description": "To be used by Admin to open or close a SSH tunnel for support. When enabling the support tunnel, the node 'id' must be *me* or the current node's 'id', because remote open is not supported. When disabling a support tunnel, the node 'id' can be that of any node in the cluster.", + "operationId": "updateTunnelStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the node add the tunnel to (this must be the current node id or *me*).", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "The support tunnel parameters.", + "required": true, + "schema": { + "$ref": "#/definitions/UpdateSupportTunnelConfig" + } + } + ], + "responses": { + "200": { + "description": "Returns the status of the support tunnel.", + "schema": { + "$ref": "#/definitions/SupportTunnelInfo" + } + } + }, + "x-group": "support" + } + }, + "/hyperv/vm/snapshot/{id}/download": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Creates a download from archival request", + "description": "Download a snapshot from archival.", + "operationId": "downloadHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/node_management/proxy_config": { + "delete": { + "tags": [ + "/node_management" + ], + "summary": "Delete existing proxy configuration", + "description": "Delete an existing proxy that was configured.", + "operationId": "deleteProxyConfig", + "parameters": [], + "responses": { + "204": { + "description": "Returned if proxy successfully deleted." + }, + "404": { + "description": "Returned if no proxy configured.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_node_management" + }, + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get existing proxy settings", + "description": "Get existing proxy settings.", + "operationId": "getProxyConfigs", + "parameters": [], + "responses": { + "200": { + "description": "Returns existing proxy config.", + "schema": { + "$ref": "#/definitions/ProxyConfigGet" + } + } + }, + "x-group": "internal_node_management" + }, + "patch": { + "tags": [ + "/node_management" + ], + "summary": "Update proxy config", + "description": "Update proxy config.", + "operationId": "updateProxyConfig", + "parameters": [ + { + "in": "body", + "name": "proxy", + "description": "Proxy url and port.", + "required": true, + "schema": { + "$ref": "#/definitions/ProxyConfig" + } + } + ], + "responses": { + "200": { + "description": "Returned if proxy successfully configured.", + "schema": { + "$ref": "#/definitions/ProxyConfigGet" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/vmware/data_center": { + "get": { + "tags": [ + "/vmware/data_center" + ], + "summary": "Gets the list of all the data centers", + "description": "Returns the summary of all the data centers.", + "operationId": "queryDataCenter", + "parameters": [ + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by Primary cluster ID, or local.", + "required": false, + "type": "string" + }, + { + "name": "vcenter_id", + "in": "query", + "description": "Filter by Vcenter ID.", + "required": false, + "type": "string" + }, + { + "name": "snappable_status", + "in": "query", + "description": "Determines whether to fetch data centers with additional privilege checks.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Returns the list of all the data centers.", + "schema": { + "$ref": "#/definitions/DataCenterSummaryListResponse" + } + } + }, + "x-group": "data_center" + } + }, + "/volume_group/snapshot/mount/{id}": { + "delete": { + "tags": [ + "/volume_group" + ], + "summary": "Requst to delete a mount", + "description": "Create a request to delete a mount. If there are volumes mounted on a target host, this will use best-effort to unmount those volumes from the host, and proceed to unmount storage on Rubrik.", + "operationId": "deleteVolumeGroupSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the mount to remove.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the unmount request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/nutanix/vm/snapshot/{id}/download": { + "post": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Creates a download from archival request", + "description": "Download a snapshot from archival.", + "operationId": "createDownloadSnapshotForNutanix", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/polaris/app_blueprint/snapshot/{id}/mount": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Live mount Blueprint snapshot", + "description": "Live mount the specified Blueprint snapshot as a new Blueprint.", + "operationId": "createAppBlueprintSnapshotMount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the request to live mount the specified Blueprint snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/AppBlueprintMountSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Accepted request for asynchronous job to live mount a Blueprint snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "app_blueprint" + } + }, + "/config/usersettable_forge/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Forge configuration", + "description": "Reset the global Forge configuration.", + "operationId": "resetUserSettableForgeConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalForgeConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalForgeConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/polaris/failover/request/{id}": { + "get": { + "tags": [ + "/polaris/failover" + ], + "summary": "Get failover job status", + "description": "Retrieve the details of a specified asynchronous job for a failover request.", + "operationId": "getFailoverAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of a failover asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "failover" + } + }, + "/sla_domain/conflicts": { + "get": { + "tags": [ + "/sla_domain" + ], + "summary": "Finding managed entities with SLA Domain conflicts", + "description": "Find managed entities that have a conflict with the SLA Domain assignment that is derived from a specified entity. For managed entities that derive an assignment from the specified entity, this call finds conflicts caused by an individual assignment, and conflicts caused by deriving assignments from more than one source.", + "operationId": "getSlaConflicts", + "parameters": [ + { + "name": "managed_id", + "in": "query", + "description": "ID of the managed entity that is the source of the derived SLA Domain assignment. All entities that derive an SLA Domain assignment from the specified managed entity are checked for conflicts.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of conflicts found in managed entities that derive an assignment from the specified managed entity.", + "schema": { + "$ref": "#/definitions/ProtectionDetailListResponse" + } + } + }, + "deprecated": true, + "x-group": "sla_domain" + } + }, + "/host/{id}/log": { + "post": { + "tags": [ + "/host" + ], + "summary": "REQUIRES SUPPORT TOKEN - Fetch host logs from agent into cluster", + "description": "REQUIRES SUPPORT TOKEN - Fetch host logs from agent into cluster /sd/scratch directory. A support token is required for this operation.", + "operationId": "fetchHostLogs", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the registered host.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Host logs successfully fetched into cluster." + } + }, + "x-group": "hosts" + } + }, + "/archive/location/job/connect/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get details about a archival location connect job", + "description": "Retrieve the following information about job: ID of job, job status, error details, start time of job, end time of job, job type, ID of the node, job progress and location id.", + "operationId": "getAddArchivalLocationJobInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Job.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return job details.", + "schema": { + "$ref": "#/definitions/JobInstanceDetail" + } + } + }, + "x-group": "archival" + } + }, + "/job/type/garbageCollection": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Create a garbage collection job instance for the current node", + "description": "REQUIRES SUPPORT TOKEN - Create a garbage collection job instance for the current node. To force garbage collection on all nodes use \"rkcl exec all 'rubrik_tool.py create_garbage_collection'\".", + "operationId": "createGarbageCollection", + "parameters": [], + "responses": { + "200": { + "description": "Returns the composite JobInstance ID of the scheduled GC job on success.", + "schema": { + "type": "string" + } + } + }, + "x-group": "internal_job" + } + }, + "/cloud_on/azure/resource_group": { + "post": { + "tags": [ + "/cloud_on" + ], + "summary": "Get list of all resource groups", + "description": "Get list of all resource groups associated with given Azure storage account credentials.\n", + "operationId": "getResourceGroups", + "parameters": [ + { + "in": "body", + "name": "azure_resource_group_request", + "description": "An Azure resource group request that contains the credentials or data location ID required to fetch a list of the Azure resource groups and compute proxy configuration.\n", + "required": true, + "schema": { + "$ref": "#/definitions/AzureResourceGroupRequest" + } + } + ], + "responses": { + "200": { + "description": "Returns the list of resource groups.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ResourceGroup" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/polaris/app_blueprint/cloud_on/aws/app_image/{id}": { + "delete": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Delete a given AWS app cloud image", + "description": "Delete a given AWS app cloud image.", + "operationId": "deleteAwsAppCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS app cloud image.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the AWS app cloud image deletion request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance_app_blueprint" + }, + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get AWS app cloud image details", + "description": "Retrieve the details for a specified AWS app cloud image.", + "operationId": "getAwsAppCloudMachineImage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an AWS app cloud image.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the AWS app cloud image.", + "schema": { + "$ref": "#/definitions/AwsAppImageDetail" + } + } + }, + "x-group": "cloud_instance_app_blueprint" + } + }, + "/cluster/{id}/platforminfo": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get platform information", + "description": "Retrieve information about the computing platform that is running Rubrik CDM software.", + "operationId": "getPlatformInfo", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or *me* for the Rubrik cluster that is hosting the API session.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Information about the computing platform that is running Rubrik CDM software.", + "schema": { + "$ref": "#/definitions/PlatformInfo" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/managed_volume/snapshot/export/{id}": { + "delete": { + "tags": [ + "/managed_volume" + ], + "summary": "Delete a managed volume snapshot export", + "description": "Delete a managed volume snapshot export.", + "operationId": "deleteManagedVolumeSnapshotExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot export.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Snapshot export sucessfully deleted." + } + }, + "x-group": "managed_volume" + }, + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get information for a managed volume snapshot export", + "description": "Retrieve detailed information about a managed volume snapshot export by specifying the snapshot export ID.", + "operationId": "getManagedVolumeSnapshotExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot export.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about the specified snapshot export.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotExportSummary" + } + } + }, + "x-group": "managed_volume" + } + }, + "/organization/{id}/linux/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get linux fileset metrics", + "description": "Retrieve the total object count and total protected object count.", + "operationId": "getLinuxMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_host" + } + }, + "/managed_volume/snapshot/{id}": { + "delete": { + "tags": [ + "/managed_volume" + ], + "summary": "Delete a managed volume snapshot", + "description": "Delete a managed volume snapshot.", + "operationId": "deleteManagedVolumeSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Snapshot location to delete. Use **_local_** to delete all local snapshots and **_all_** to delete the snapshot in all locations. Defaults to **_all_** if not set.", + "required": false, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot sucessfully deleted." + } + }, + "x-group": "managed_volume" + }, + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get managed volume snapshot details", + "description": "Retrieve detailed information about a snapshot.", + "operationId": "getManagedVolumeSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns details about a snapshot.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotDetail" + } + } + }, + "x-group": "managed_volume" + } + }, + "/config/usersettable_cerebro/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Cerebro configuration", + "description": "Reset the global Cerebro configuration.", + "operationId": "resetUserSettableCerebroConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalCerebroConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalCerebroConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/cluster/{id}/decommission_nodes": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Decommission nodes", + "description": "Start an asynchronous task to decommission the specified node or nodes.", + "operationId": "decommissionNodes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "decommission_nodes_config", + "description": "Node decommission job properties.", + "required": true, + "schema": { + "$ref": "#/definitions/DecommissionNodesConfig" + } + } + ], + "responses": { + "202": { + "description": "Returns the request ID for the asynchronous node decommission job.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Could not decommission node (Invalid Input).", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "cluster" + } + }, + "/cluster/{id}/register": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Submit community user credentials to register a cluster", + "description": "Submit community user credentials to register a cluster.", + "operationId": "registerWithRubrik", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a Rubrik cluster, or use *me* for the Rubrik cluster that is hosting the current session.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "community_user_credentials", + "description": "Community User Credentials.", + "required": true, + "schema": { + "$ref": "#/definitions/CommunityUserCredentials" + } + } + ], + "responses": { + "204": { + "description": "Successfully registered with Rubrik." + } + }, + "x-group": "cluster" + } + }, + "/hyperv/vm/snapshot/{id}/export": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Export VM snapshot", + "description": "Export snapshot of a vm.", + "operationId": "exportHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the export request.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervExportSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/oracle/db/{id}/missed_snapshot": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get missed snapshots for an Oracle database", + "description": "Retrieve summary information about the missed snapshots of an Oracle database.", + "operationId": "getMissedOracleDbSnapshots", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle database object.", + "required": true, + "type": "string" + }, + { + "name": "after_time", + "in": "query", + "description": "Filter the matches in the response to include only the snapshots taken on or after the time specified by a date-time string. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "before_time", + "in": "query", + "description": "Filter the matches in the response to include only the snapshots taken on or after the time specified by a date-time string. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + } + ], + "responses": { + "200": { + "description": "Information about missed snapshots for an Oracle database.", + "schema": { + "$ref": "#/definitions/MissedSnapshotListResponse" + } + } + }, + "x-group": "oracle_db" + } + }, + "/aws/ec2_instance/{id}": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get EC2 instance details", + "description": "Retrieve details of an EC2 instance object.", + "operationId": "getAwsEc2Instance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the EC2 Instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return details about the AWS EC2 instance.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceDetail" + } + } + }, + "x-group": "aws_ec2_instance" + }, + "patch": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Update EC2 instance", + "description": "Update the configuration of a specific EC2 instance object with specified properties.", + "operationId": "updateAwsEc2Instance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an EC2 instance object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "ec2_update_properties", + "description": "Properties for an EC2 object update.", + "required": true, + "schema": { + "$ref": "#/definitions/AwsEc2InstanceUpdate" + } + } + ], + "responses": { + "200": { + "description": "Return details about the AWS EC2 instance.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceDetail" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/hyperv/vm/snapshot/{id}/instant_recover": { + "post": { + "tags": [ + "/hyperv/vm" + ], + "summary": "Creates an instant recover request that restores a target VM from the given Rubrik-hosted-snapshot", + "description": "The VM will be started with networking enabled. If the VM does not exist anymore, a new VM will be created.", + "operationId": "recoverHypervVirtualMachineSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the instant recover request.", + "required": true, + "schema": { + "$ref": "#/definitions/HypervInstantRecoveryJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the instant recover request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_vm" + } + }, + "/cluster/{id}/security/ssh": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get whether SSH is enabled for nodes in the cluster", + "description": "Returns whether SSH is enabled on TCP port 22 for all nodes in the cluster.", + "operationId": "getSshEnabled", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "The current state of whether SSH is enabled.", + "schema": { + "$ref": "#/definitions/SshConfig" + } + } + }, + "x-group": "security" + }, + "put": { + "tags": [ + "/cluster" + ], + "summary": "Set whether SSH is enabled/disabled for nodes in the cluser", + "description": "Disables or enables SSH on TCP port 22 for all nodes in the cluster.", + "operationId": "setSshEnabled", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "config", + "description": "The new configuration parameters for SSH, including whether it is enabled.", + "required": true, + "schema": { + "$ref": "#/definitions/SshConfig" + } + } + ], + "responses": { + "200": { + "description": "The current state of whether SSH is enabled.", + "schema": { + "$ref": "#/definitions/SshConfig" + } + } + }, + "x-group": "security" + } + }, + "/host/{id}/diagnose": { + "get": { + "tags": [ + "/host" + ], + "summary": "Get host availability statuses", + "description": "Retrieve the availability status for each host registered with a specified Rubrik CDM instance.", + "operationId": "getHostDiagnosis", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the host object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Availability information for registered hosts.", + "schema": { + "$ref": "#/definitions/HostDiagnosisSummary" + } + } + }, + "x-group": "hosts" + } + }, + "/cluster/{id}/disk_capacity": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get installed disk capacity", + "description": "Retrieves total installed disk space on the Rubrik cluster.", + "operationId": "getDiskCapacity", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Disk capacity of the Rubrik cluster.", + "schema": { + "$ref": "#/definitions/DiskCapacity" + } + } + }, + "x-group": "cluster" + } + }, + "/vcd/vapp/{id}": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get vApp details", + "description": "Retrieve detailed information for a specified vApp. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/{id}`.", + "operationId": "getVcdVapp", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vApp object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Detailed information about a vApp object.", + "schema": { + "$ref": "#/definitions/VcdVappDetail" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + }, + "patch": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Update vApp", + "description": "Make changes to the parameters of a specified vApp object. This endpoint will be removed in CDM v6.1 in favor of `PATCH v1/vcd/vapp/{id}`.", + "operationId": "updateVcdVapp", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vApp object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "patch_properties", + "description": "Parameters to use to update the specified vApp object.", + "required": true, + "schema": { + "$ref": "#/definitions/VcdVappPatch" + } + } + ], + "responses": { + "200": { + "description": "Details of an updated vApp object.", + "schema": { + "$ref": "#/definitions/VcdVappDetail" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/cluster/{id}/disk/{disk_id}": { + "delete": { + "tags": [ + "/cluster" + ], + "summary": "Instruct the cluster that a disk has been removed", + "description": "Marks the disk removed and updates cluster metadata.", + "operationId": "removeDisk", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "disk_id", + "in": "path", + "description": "ID of a missing disk to mark removed.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully removed the disk from the cluster." + } + }, + "x-group": "cluster" + }, + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Set up an unformatted disk", + "description": "Formats, initializes and mounts an unformatted disk.", + "operationId": "setupDisk", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "disk_id", + "in": "path", + "description": "ID of an unformatted disk to set up.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Information about the formatted disk.", + "schema": { + "$ref": "#/definitions/DiskInfo" + } + } + }, + "x-group": "cluster" + } + }, + "/vcd/hierarchy/{id}": { + "get": { + "tags": [ + "/vcd/hierarchy" + ], + "summary": "(DEPRECATED) Get summary of a vCD hierarchy object", + "description": "Retrieve details for the specified object in the vCD hierarchy. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/hierarchy/{id}`.", + "operationId": "getVcdHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vCD hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of the hierarchy object.", + "schema": { + "$ref": "#/definitions/VcdHierarchyObjectSummary" + } + } + }, + "deprecated": true, + "x-group": "vcd_hierarchy" + } + }, + "/managed_volume/{id}/begin_snapshot": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Begin managed volume snapshot", + "description": "Opens the managed volume for writes. All writes to the managed volume until next end-snapshot call will be part of this snapshot.", + "operationId": "openWrites", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of managed volume.", + "required": true, + "type": "string" + }, + { + "name": "owner_id", + "in": "query", + "description": "A string representing the owner of a snapshot. This owner ID must be used while adding a reference to this snapshot.", + "required": false, + "type": "string" + }, + { + "in": "body", + "name": "reference", + "description": "Details about the reference to be added to the snapshot.", + "required": false, + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotReferenceWrapper" + }, + "x-hidden": true + } + ], + "responses": { + "200": { + "description": "The managed volume is opened for writes.", + "schema": { + "$ref": "#/definitions/ManagedVolumeInflightSnapshotSummary" + } + } + }, + "x-group": "managed_volume" + } + }, + "/oracle/db/{id}/export": { + "post": { + "tags": [ + "/oracle" + ], + "summary": "Export an Oracle database", + "description": "Request an asynchronous job to export an Oracle database from a specified snapshot or timestamp.", + "operationId": "createExportOracleDb", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the database to be exported.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration parameters for a job to export an Oracle database from a specified snapshot or timestamp.", + "required": true, + "schema": { + "$ref": "#/definitions/ExportOracleDbConfig" + } + } + ], + "responses": { + "202": { + "description": "Request status for an async job to export an Oracle database from a specified snapshot or timestamp.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "oracle_db" + } + }, + "/cloud_on/azure/instance/{id}/cloud_vm": { + "delete": { + "tags": [ + "/cloud_on" + ], + "summary": "Terminate a given Azure cloud instance", + "description": "Terminates a given Azure instance on cloud. The instance status should be STOPPED for the termination to happen.\n", + "operationId": "deleteAzurePublicCloudMachineInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status for the Azure instance deletion request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance" + }, + "patch": { + "tags": [ + "/cloud_on" + ], + "summary": "Turn on or off a given Azure cloud instance", + "description": "Turn on or off a given Azure cloud instance.", + "operationId": "switchAzurePublicCloudMachineInstancePower", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Azure cloud instance.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update", + "description": "Properties to update.", + "required": true, + "schema": { + "$ref": "#/definitions/CloudInstanceUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns updated details about the Azure cloud instance.", + "schema": { + "$ref": "#/definitions/AzureInstanceDetail" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/stats/snapshot_storage/ingested": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot ingested amount", + "description": "Retrieve the amount of data ingested by the Rubrik cluster for all snapshots.", + "operationId": "ingestedSnapshotStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/stats/physical_ingest/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get timeseries on physical data ingestion", + "description": "Get timeseries on physical data ingestion.", + "operationId": "physicalIngest", + "parameters": [ + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/job/type/expire": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Create an expire snapshot job instance", + "description": "TODO.", + "operationId": "createExpire", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the expire snapshot job.", + "required": true, + "schema": { + "$ref": "#/definitions/ExpireSnapshotJobConfig" + } + } + ], + "responses": { + "200": { + "description": "Return the expire job instance id on success.", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Returned if snappable does not exist.", + "schema": { + "type": "string" + } + }, + "422": { + "description": "Returned if the request to create the expire job fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_job" + } + }, + "/cloud_on/azure/security_group": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get a list of security group IDs queried by Azure location ID", + "operationId": "queryAzureSecurityGroup", + "parameters": [ + { + "name": "data_location_id", + "in": "query", + "description": "Azure data location ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of security group IDs recognizable by Azure.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityGroup" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/node_management/reset": { + "post": { + "tags": [ + "/node_management" + ], + "summary": "Issues a reset to the current node", + "description": "Issues a reset to the current node.", + "operationId": "resetNode", + "parameters": [], + "responses": { + "204": { + "description": "Successfully started reseting the node." + } + }, + "x-group": "internal_node_management", + "x-unauthenticated": true + } + }, + "/config/usersettable_volumeGroup": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Volumegroup configuration", + "description": "Fetch the global Volumegroup configuration.", + "operationId": "getUserSettableVolumeGroupConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalVolumeGroupConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Volumegroup configuration", + "description": "Update the global Volumegroup configuration.", + "operationId": "updateUserSettableVolumeGroupConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalVolumeGroupConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalVolumeGroupConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/node_management/is_bootstrapped": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Returns whether this machine is bootstrapped to a cluster", + "description": "Check whether this machine has been bootstrapped.", + "operationId": "isBootstrapped", + "parameters": [], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "internal_node_management", + "x-unauthenticated": true + } + }, + "/organization/{id}/nutanix/vm/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get nutanix vm metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getNutanixVmMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_nutanix" + } + }, + "/polaris/failover/image/{id}/unpin": { + "post": { + "tags": [ + "/polaris/failover" + ], + "summary": "Unpins Blueprint cloud machine image after failover", + "description": "Unpin the Blueprint cloud machine image after the failover has completed.", + "operationId": "unpinAppCloudMachineImageForFailover", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint cloud machine image.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "failover_id", + "description": "ID of the failover job triggering the pin.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Image successfully unpinned." + } + }, + "x-group": "failover" + } + }, + "/managed_volume/snapshot/export": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get summary information for all managed volume snapshot exports", + "description": "Returns a list of summary information for all exports of all managed volume snapshots, sorted and filtered.", + "operationId": "queryManagedVolumeSnapshotExport", + "parameters": [ + { + "name": "source_managed_volume_id", + "in": "query", + "description": "Filter snapshot exports to those that belong to the managed volume with given id.", + "required": false, + "type": "string" + }, + { + "name": "source_managed_volume_name", + "in": "query", + "description": "Filter snapshot exports to those that belong to the managed volume with given name using infix search.", + "required": false, + "type": "string" + }, + { + "name": "managed_volume_type", + "in": "query", + "description": "Filter by the type of managed volume.", + "required": false, + "type": "string", + "enum": [ + "AlwaysMounted", + "SlaBased" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Specifies the managed volume export attribute to use in sorting the managed volume summary information. Performs an ASCII sort of the summary information using the specified attribute, in the order specified.\nValid attributes are 'status', 'snapshotDate', 'sourceManagedVolumeName', 'exportedDate'.", + "required": false, + "type": "string", + "enum": [ + "status", + "snapshotDate", + "sourceManagedVolumeName", + "exportedDate" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Returns summary information for snapshot exports.", + "schema": { + "$ref": "#/definitions/ManagedVolumeSnapshotExportSummaryListResponse" + } + } + }, + "x-group": "managed_volume" + } + }, + "/organization/{id}/nas": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized NAS shares in an organization", + "description": "Retrieve summary information for the explicitly authorized NAS shares of an organization. Information for a NAS share is only included when the organization has an explicit authorization for the share. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedNasShares", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get NAS shares.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "organization_host" + } + }, + "/polaris/app_blueprint/deprecate/{id}": { + "post": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Deprecate a Blueprint at the end of successful production failover", + "description": "During production failover, after all of the virtual machine images have been successfully instantiated in the cloud, archive the Blueprint and prefix the virtual machine names with \"Deprecated \" on the vCenter. This endpoint is reserved for Polaris after a successful production failover of the Blueprint.", + "operationId": "deprecateAppBlueprint", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to the Blueprint object.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Blueprint successfully deprecated." + } + }, + "x-group": "app_blueprint" + } + }, + "/organization/{id}/storage/array/volume_group/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get storage array volume group metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getStorageArrayVolumeGroupMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_storage_array_volume_group" + } + }, + "/smb/config": { + "get": { + "tags": [ + "/smb" + ], + "summary": "Get SMB configuration", + "description": "Get SMB configuration.", + "operationId": "getSmbConfiguration", + "parameters": [], + "responses": { + "200": { + "description": "Successfully configured SMB.", + "schema": { + "$ref": "#/definitions/SmbConfig" + } + } + }, + "x-group": "smb" + }, + "put": { + "tags": [ + "/smb" + ], + "summary": "SMB configuration", + "description": "SMB configuration.", + "operationId": "putSmbConfiguration", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "SMB configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/SmbConfig" + } + } + ], + "responses": { + "200": { + "description": "Successfully configured SMB.", + "schema": { + "$ref": "#/definitions/SmbConfig" + } + } + }, + "x-group": "smb" + } + }, + "/nutanix/cluster": { + "post": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Add Nutanix cluster", + "description": "Create a Nutanix cluster object by providing an address and account credentials for Prism. Initiates an asynchronous job to establish a connection with the cluster and retrieve all metadata. Use GET /nutanix_cluster/{id}/status to check status.", + "operationId": "createNutanixCluster", + "parameters": [ + { + "in": "body", + "name": "nutanix_cluster_config", + "description": "IP address, natural ID of added cluster (since Prism central can manage multiple clusters), and credentials for Prism.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixClusterConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the refresh request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "nutanix_cluster" + }, + "get": { + "tags": [ + "/nutanix/cluster" + ], + "summary": "Get list of Nutanix clusters", + "description": "Retrieve information for each managed Nutanix cluster.", + "operationId": "queryNutanixCluster", + "parameters": [ + { + "name": "primary_cluster_id", + "in": "query", + "description": "Limits the information to the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + }, + { + "name": "should_get_status", + "in": "query", + "description": "Determines whether or not the status field is populated. Populating this field involves issuing a call to the Nutanix cluster itself, which may not be performant. Defaults to false.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Summary information for managed Nutanix clusters.", + "schema": { + "$ref": "#/definitions/NutanixClusterSummaryListResponse" + } + } + }, + "x-group": "nutanix_cluster" + } + }, + "/organization/{id}/stats/total_storage_usage": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get the Total Storage Usage for an Organization", + "description": "Retrieve the Total Storage Usage (bytes) for the Organization specified by id.", + "operationId": "getTotalStorage", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an Organization object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with attribute physical and logical, each representing the respective stat information.", + "schema": { + "$ref": "#/definitions/OrganizationStat" + } + } + }, + "x-group": "organization" + } + }, + "/smb/domain/{domain_name}": { + "delete": { + "tags": [ + "/smb" + ], + "summary": "Delete Active Directory from Rubrik", + "description": "Delete Active Directory from Rubrik.", + "operationId": "deleteSmbDomain", + "parameters": [ + { + "name": "domain_name", + "in": "path", + "description": "SMB domain name.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully delete Active Directory from Rubrik." + } + }, + "x-group": "smb" + } + }, + "/mssql/db/defaults": { + "get": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Returns the current default properties for Microsoft SQL databases", + "description": "The default properties are Log Backup Frequency (in seconds) and CBT status. New databases added to the Rubrik system will be given the log backup frequency value by default. New hosts added to the Rubrik system will be given the CBT status by default. This endpoint will be removed in CDM v6.0 in favor of `GET v1/mssql/db/defaults`.", + "operationId": "getDefaultDbProperties", + "parameters": [], + "responses": { + "200": { + "description": "Returns the current default properties.", + "schema": { + "$ref": "#/definitions/MssqlDbDefaults" + } + } + }, + "deprecated": true, + "x-group": "mssql" + }, + "patch": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Update the default properties for Microsoft SQL databases", + "description": "The default properties are Log Backup Frequency (in seconds) and CBT status. New databases added to the Rubrik system will be given the log backup frequency value by default. New hosts added to the Rubrik system will be given the CBT status by default. This endpoint will be removed in CDM v6.0 in favor of `PATCH v1/mssql/db/defaults`.", + "operationId": "updateDefaultDbProperties", + "parameters": [ + { + "in": "body", + "name": "default_properties", + "description": "Updated default properties.", + "required": true, + "schema": { + "$ref": "#/definitions/MssqlDbDefaultsUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns the updated default properties.", + "schema": { + "$ref": "#/definitions/MssqlDbDefaults" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/smtp_instance/{id}": { + "delete": { + "tags": [ + "/smtp_instance" + ], + "summary": "Delete a smtp instance", + "description": "Deletes a SMTP instance.", + "operationId": "deleteSmtpInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the smtp instance to be deleted.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "SMTP instance deleted." + } + }, + "x-group": "smtp_instance" + }, + "get": { + "tags": [ + "/smtp_instance" + ], + "summary": "Details of a smtp instance", + "description": "Details of a SMTP instance.", + "operationId": "getSmtpInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the smtp instance to be fetched.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/SmtpInstanceDetail" + } + } + }, + "x-group": "smtp_instance" + }, + "patch": { + "tags": [ + "/smtp_instance" + ], + "summary": "update specifications for sending email", + "description": "update specifications for sending email.", + "operationId": "updateSmtpInstance", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the smtp instance to be deleted.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "instance", + "description": "smtp instance.s.", + "required": true, + "schema": { + "$ref": "#/definitions/UpdateSmtpInstanceDefinition" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/SmtpInstanceDetail" + } + } + }, + "x-group": "smtp_instance" + } + }, + "/oracle/rac/{id}": { + "get": { + "tags": [ + "/oracle" + ], + "summary": "Get Oracle RAC information", + "description": "Retrieve detailed information for a specified Oracle RAC object.", + "operationId": "getOracleRac", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle RAC object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successful query results.", + "schema": { + "$ref": "#/definitions/OracleRacDetail" + } + } + }, + "x-group": "oracle_rac" + }, + "patch": { + "tags": [ + "/oracle" + ], + "summary": "Update an Oracle RAC", + "description": "Update properties of an Oracle RAC object.", + "operationId": "updateOracleRac", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an Oracle RAC object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_properties", + "description": "Properties to use for the update of an Oracle RAC object.", + "required": true, + "schema": { + "$ref": "#/definitions/OracleUpdate" + } + } + ], + "responses": { + "200": { + "description": "Successfully updated an Oracle RAC object.", + "schema": { + "$ref": "#/definitions/OracleRacDetail" + } + } + }, + "x-group": "oracle_rac" + } + }, + "/archive/location/request/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get the details of an asynchronous archival location request", + "description": "Returns the details of an asynchronous archival location request issued at an earlier time.", + "operationId": "getArchiveLocationRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the asynchronous request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of the specified asynchronous archival location request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "archival" + } + }, + "/config/usersettable_shield/reset": { + "patch": { + "tags": [ + "/config" + ], + "summary": "Reset the global Shield configuration", + "description": "Reset the global Shield configuration.", + "operationId": "resetUserSettableShieldConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "Configuration keys to reset.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalShieldConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalShieldConfig" + } + } + }, + "x-group": "internal_config_reset" + } + }, + "/vmware/vm/count": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Count of all the local non-archived VMs", + "description": "Count of all the local non-archived VMs.", + "operationId": "countVm", + "parameters": [], + "responses": { + "200": { + "description": "Returns a count of all the non-archived VMs.", + "schema": { + "$ref": "#/definitions/CountResponse" + } + } + }, + "x-group": "vm" + } + }, + "/host/share": { + "post": { + "tags": [ + "/host/share" + ], + "summary": "Add a network share to a host", + "description": "Add a network share object to a host.", + "operationId": "addShareToHost", + "parameters": [ + { + "in": "body", + "name": "share", + "description": "Network share creation definition.", + "required": true, + "schema": { + "$ref": "#/definitions/HostShareCreate" + } + } + ], + "responses": { + "201": { + "description": "Detail information for added network share.", + "schema": { + "$ref": "#/definitions/HostShareDetail" + } + } + }, + "x-group": "hosts" + }, + "get": { + "tags": [ + "/host/share" + ], + "summary": "Get summary information for network shares", + "description": "Retrieve summary information for network shares. Apply filters to refine the returned information.", + "operationId": "queryHostShare", + "parameters": [ + { + "name": "host_id", + "in": "query", + "description": "Filter the summary information based on the host id.", + "required": false, + "type": "string" + }, + { + "name": "share_type", + "in": "query", + "description": "Filter the summary information based on the share type.", + "required": false, + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filters the summary information based on the Rubrik cluster specified by the value of primary_cluster_id. Use 'local' for the Rubrik cluster that is hosting the current REST API session.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Summary information for network shares.", + "schema": { + "$ref": "#/definitions/HostShareDetailListResponse" + } + } + }, + "x-group": "hosts" + } + }, + "/polaris/export_info/request/{id}": { + "get": { + "tags": [ + "/polaris" + ], + "summary": "Get asynchronous request details for an export info job", + "description": "Get the details of an asynchronous request that runs an export info job.", + "operationId": "getExportObjectInfoRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous export info request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous export info job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/vmware/datastore": { + "get": { + "tags": [ + "/vmware/datastore" + ], + "summary": "Get summaries of all the DataStores", + "description": "Get summaries of all the DataStores.", + "operationId": "queryDatastore", + "parameters": [], + "responses": { + "200": { + "description": "Returns the summaries of DataStores.", + "schema": { + "$ref": "#/definitions/DataStoreSummaryListResponse" + } + } + }, + "x-group": "datastore" + } + }, + "/organization/{id}/replication/source": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get replication targets associated with this organization", + "description": "Retrieve the total list of replication targets that have been granted to this organization.", + "operationId": "getOrganizationReplicationSources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of Replication Sources that have been assigned to this organization.", + "schema": { + "$ref": "#/definitions/ReplicationSourceSummaryListResponse" + } + } + }, + "x-group": "organization_replication" + } + }, + "/node_management/cluster/{id}/failure_tolerance": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get Rubrik cluster failure tolerance information", + "description": "Return various numbers of component failures allowed in the Rubrik cluster under which the cluster remains fully functional.", + "operationId": "failureToleranceStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Failures can be tolerated in the cluster.", + "schema": { + "$ref": "#/definitions/FailureToleranceStatus" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/report/data_source/download": { + "post": { + "tags": [ + "/report" + ], + "summary": "Get a CSV file for the report data source specified by type", + "description": "Create a job to generate data source csv file in background. Returns a job id to poll the status of job.", + "operationId": "getDataSourceCsvAsync", + "parameters": [ + { + "in": "body", + "name": "data_source", + "description": "Type of the report data source.", + "required": true, + "schema": { + "$ref": "#/definitions/DataSourceDownloadConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of the file download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "internal_report" + } + }, + "/storage/array_volume_group/snapshot/{id}/export": { + "post": { + "tags": [ + "/storage/array" + ], + "summary": "Export Volume group snapshot", + "description": "Export snapshot of a volume group.", + "operationId": "createStorageArrayExport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the export request.", + "required": true, + "schema": { + "$ref": "#/definitions/StorageArrayVolumeGroupExportSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/job/type/backup": { + "post": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Create a backup job instance", + "description": "TODO.", + "operationId": "createVmBackupJob", + "parameters": [ + { + "in": "body", + "name": "config", + "description": "Configuration for the backup job.", + "required": true, + "schema": { + "$ref": "#/definitions/BackupJobConfig" + } + } + ], + "responses": { + "200": { + "description": "Status of backup job creation.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "x-group": "vm" + } + }, + "/cloud_on/azure/recommended_instance_type": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get the recommended instance type for a given snapshot", + "description": "Get the recommended instance type for a given snapshot.", + "operationId": "getAzureRecommendedInstanceType", + "parameters": [ + { + "name": "snapshot_id", + "in": "query", + "description": "snapshot ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns the recommended instance type.", + "schema": { + "$ref": "#/definitions/RecommendedInstanceType" + } + } + }, + "x-group": "cloud_instance" + } + }, + "/organization/{id}/vmware": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get information for authorized VMware resources in an organization", + "description": "Retrieve summary information for the explicitly authorized VMware resources of an organization. Information for a Vmware resource is only included when the organization has an explicit authorization for the resource. This endpoint returns an empty list for the default global organization.", + "operationId": "getExplicitlyAuthorizedVmwareResources", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Get VMware resources.", + "schema": { + "$ref": "#/definitions/ManagedObjectSummaryListResponse" + } + } + }, + "x-group": "organization_vmware" + } + }, + "/hyperv/host": { + "get": { + "tags": [ + "/hyperv/host" + ], + "summary": "Get summary of all the Hyper-V hosts", + "description": "Get summary of all the Hyper-V hosts.", + "operationId": "queryHypervHost", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by the given attribute.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "List of Hyper-V host summaries.", + "schema": { + "$ref": "#/definitions/HypervHostSummaryListResponse" + } + } + }, + "x-group": "hyperv_host" + } + }, + "/storage/array_volume_group/request/{id}": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get storage array volume gorup request", + "description": "Retrieve the status and details of a specified asynchronous request for a storage array volume gorup object.", + "operationId": "getStorageArrayVolumeGroupAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous request for a storage array volume group object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous request for a storage array volume group object.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/archive/object_store/{id}/glacier_vault_lock": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Complete or abort an in-progress vault lock operation", + "description": "Update an existing in-progress vault lock operation on a specific Amazon Glacier archival location, by either completing or aborting the current vault lock.\n", + "operationId": "completeOrAbortVaultLock", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Glacier archival location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "operation", + "description": "Object describing the operation to perform: Complete or Abort.\n", + "required": true, + "schema": { + "$ref": "#/definitions/GlacierVaultLockOperation" + } + } + ], + "responses": { + "204": { + "description": "Vault lock operation completed successfully." + } + }, + "x-group": "archival" + } + }, + "/managed_volume/snapshot/{id}/download_file": { + "post": { + "tags": [ + "/managed_volume" + ], + "summary": "Download file from Managed volume snapshot", + "description": "Create a download file request.", + "operationId": "createManagedVolumeDownloadFileJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for a download request.", + "required": true, + "schema": { + "$ref": "#/definitions/ManagedVolumeDownloadFileJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status for the file download request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/stats/sla_domain_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get total storage for all the SLA domains", + "description": "Get total storage for all the SLA domains.", + "operationId": "totalSlaDomainStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/aws/account/security_group": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get security groups by AWS account", + "description": "Retrieve a list of the security groups available for a specified AWS account.", + "operationId": "queryAwsAccountSecurityGroupWithSpec", + "parameters": [ + { + "name": "access_key", + "in": "query", + "description": "AWS Access key.", + "required": true, + "type": "string" + }, + { + "name": "secret_key", + "in": "query", + "description": "AWS Secret key.", + "required": true, + "type": "string", + "x-secret": true + }, + { + "name": "region", + "in": "query", + "description": "AWS region.", + "required": true, + "type": "string" + }, + { + "name": "vpc_id", + "in": "query", + "description": "AWS ID for a virtual private cloud (VPC).", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of security group IDs for a specified AWS account.", + "schema": { + "$ref": "#/definitions/SecurityGroupListResponse" + } + } + }, + "x-group": "aws_account" + } + }, + "/cluster/{id}/is_encrypted": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get if it is encrypted", + "description": "Check whether this Rubrik cluster is encrypted (the encryption could be either be software-based encryption or hardware-based encryption).\n", + "operationId": "isEncrypted", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "True when the Rubrik cluster is encrypted.", + "schema": { + "$ref": "#/definitions/BooleanResponse" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + } + }, + "/cluster/{id}/flash_capacity": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get installed flash capacity", + "description": "Retrieves total installed flash on the Rubrik cluster.", + "operationId": "getFlashCapacity", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Flash capacity for the Rubrik cluster.", + "schema": { + "$ref": "#/definitions/DiskCapacity" + } + } + }, + "x-group": "cluster" + } + }, + "/cluster/{id}/floating_ip": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get a list of a cluster's floating IP to node mapping", + "description": "Get a list of a cluster's floating IP to node mapping.", + "operationId": "queryFloatingIp", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "floating_ip", + "in": "query", + "description": "Filter the result based on the floating_ip configured on the rubrik cluster.\nThe query returns the node that owns this cluster IP address.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "cluster IP to nodeId dictionary.", + "schema": { + "$ref": "#/definitions/FloatingIpListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/managed_volume/request/{id}": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Get managed volume async request status", + "description": "Get status of a managed volume async request.", + "operationId": "getManagedVolumeAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "managed_volume" + } + }, + "/vcd/vapp/snapshot/{snapshot_id}/export": { + "post": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Export vApp snapshot", + "description": "Export the specified vApp snapshot into a new vApp or an existing vApp. This endpoint will be removed in CDM v6.1 in favor of `POST v1/vcd/vapp/snapshot/{snapshot_id}/export`.", + "operationId": "createVappExport", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID assigned to the vApp snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the request to export the specified vApp snapshot.", + "required": true, + "schema": { + "$ref": "#/definitions/VappExportSnapshotJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Accepted request for asynchronous job to export a vApp snapshot.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/polaris/app_blueprint/cloud_on/request/{id}": { + "get": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Get Blueprint cloud on job status", + "description": "Retrieve the details of a specified asynchronous job for a Blueprint.", + "operationId": "getAppBlueprintCloudOnAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an asynchronous job.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Status of a Blueprint asynchronous job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "cloud_instance_app_blueprint" + } + }, + "/polaris/replication/source/{id}": { + "delete": { + "tags": [ + "/polaris/replication/source" + ], + "summary": "Remove the Polaris account as a replication source", + "description": "Remove the Polaris account as a replication source for this cluster.", + "operationId": "deletePolarisReplicationSource", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Managed ID of the Polaris replication source.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully removed the Polaris replication source." + } + }, + "x-group": "polaris_replication_source" + } + }, + "/cluster/{id}/login_banner": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get login banner of the cluster", + "description": "Returns the banner that displays after a successful login.", + "operationId": "getLoginBanner", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "Returns this code on a successful query.", + "schema": { + "$ref": "#/definitions/LoginBannerConfiguration" + } + } + }, + "x-group": "cluster", + "x-unauthenticated": true + }, + "put": { + "tags": [ + "/cluster" + ], + "summary": "Set login banner of the cluster", + "description": "Sets the banner that displays after a successful login.", + "operationId": "setLoginBanner", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "login_banner_config", + "description": "Login banner configuration for the cluster.", + "required": true, + "schema": { + "$ref": "#/definitions/LoginBannerConfiguration" + } + } + ], + "responses": { + "200": { + "description": "Returned if the operation was successful.", + "schema": { + "$ref": "#/definitions/LoginBannerConfiguration" + } + } + }, + "x-group": "cluster" + } + }, + "/nutanix/hierarchy/{id}/children": { + "get": { + "tags": [ + "/nutanix/hierarchy" + ], + "summary": "Get list of immediate descendant objects", + "description": "Retrieve the list of immediate descendant objects for the specified parent.", + "operationId": "getNutanixHierarchyChildren", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the parent Nutanix hierarchy object. To get top-level nodes, use **root** as the ID.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by node object type.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter by isRelic field of Nutanix VM hierarchy object. Return both relic and non-relic children if this query is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name", + "descendantCount.cluster", + "descendantCount.vm" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "snappable_status", + "in": "query", + "description": "Filters Nutanix hierarchy objects based on the specified query value.", + "required": false, + "type": "string", + "enum": [ + "Protectable" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendant objects.", + "schema": { + "$ref": "#/definitions/NutanixHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "nutanix_hierarchy" + } + }, + "/cloud_on/azure/instance_type_list": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get list of all instance types", + "description": "Get list of all instance types.", + "operationId": "getAzureInstanceTypeList", + "parameters": [], + "responses": { + "200": { + "description": "Returns the list of instance types.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/RecommendedInstanceType" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/polaris/app_blueprint/snapshot/{id}": { + "delete": { + "tags": [ + "/polaris/app_blueprint" + ], + "summary": "Delete Blueprint snapshot", + "description": "Designate a Blueprint snapshot as expired and available for garbage collection. The snapshot must be an on-demand snapshot or a snapshot from a Blueprint that is not assigned to an SLA Domain.", + "operationId": "deleteAppBlueprintSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a snapshot object.", + "required": true, + "type": "string" + }, + { + "name": "location", + "in": "query", + "description": "Location of the snapshot to delete. Use _local_ to delete only the local copy of the snapshot. Use _all_ to delete the snapshot locally, on a replication target, and at an archival location.", + "required": true, + "type": "string", + "enum": [ + "all", + "local" + ] + } + ], + "responses": { + "204": { + "description": "Snapshot successfully deleted." + } + }, + "x-group": "app_blueprint" + } + }, + "/report/{id}": { + "delete": { + "tags": [ + "/report" + ], + "summary": "Delete a specific report", + "description": "Delete a specific report specified by reportId.", + "operationId": "deleteReport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Returned if report was deleted successfully." + } + }, + "x-group": "internal_report" + }, + "get": { + "tags": [ + "/report" + ], + "summary": "Get information about a specific report", + "description": "Get report details and update status.", + "operationId": "getReport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return report details.", + "schema": { + "$ref": "#/definitions/ReportDetail" + } + } + }, + "x-group": "internal_report" + }, + "patch": { + "tags": [ + "/report" + ], + "summary": "Update a specific report", + "description": "Update a specific report. The report's name, chart parameters, filters and table can be updated. If successful, this will automatically trigger an async job to refresh the report content.", + "operationId": "updateReport", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the report.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "update_config", + "description": "Report. update config.", + "required": true, + "schema": { + "$ref": "#/definitions/ReportUpdate" + } + } + ], + "responses": { + "200": { + "description": "Returns the status of the job that was automatically triggered to refresh the report's content.", + "schema": { + "$ref": "#/definitions/ReportDetail" + } + } + }, + "x-group": "internal_report" + } + }, + "/stats/physical_storage": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get total physical storage being used in the system", + "description": "Get total Physical storage being used in the system.", + "operationId": "physicalStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/aws/account/subnet": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get subnets by AWS account", + "description": "Retrieve a list of the subnets available for a specified AWS account.", + "operationId": "queryAwsAccountSubnetWithSpec", + "parameters": [ + { + "name": "access_key", + "in": "query", + "description": "AWS Access key.", + "required": true, + "type": "string" + }, + { + "name": "secret_key", + "in": "query", + "description": "AWS Secret key.", + "required": true, + "type": "string", + "x-secret": true + }, + { + "name": "region", + "in": "query", + "description": "Name of an AWS region.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Subnets available for the specified AWS crednetials.", + "schema": { + "$ref": "#/definitions/SubnetListResponse" + } + } + }, + "x-group": "aws_account" + } + }, + "/vcd/cluster/{id}/vimserver": { + "get": { + "tags": [ + "/vcd/cluster" + ], + "summary": "(DEPRECATED) Get VimServers of a vCD Cluster", + "description": "Retrieves the VimServer representation of the vCenter Servers that are attached to a specified vCD Cluster object. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/cluster/{id}/vimserver`.", + "operationId": "queryVcdVimServer", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a vCD Cluster object.", + "required": true, + "type": "string" + }, + { + "name": "sort_by", + "in": "query", + "description": "Attribute to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "Name", + "Status" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Order for sorting the results, either ascending or descending.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary information for VimServer objects.", + "schema": { + "$ref": "#/definitions/VimserverSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vcd_cluster" + } + }, + "/vcd/vapp/snapshot/{snapshot_id}/instant_recover/options": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get Instant Recovery information", + "description": "Retrieve the available vApp network connections and the default vApp network connection for the virtual machines in a vApp snapshot. Use this information to configure an Instant Recovery of specified virtual machines in the vApp snapshot. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/snapshot/{snapshot_id}/instant_recover/options`.", + "operationId": "getVappSnapshotInstantRecoveryOptions", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID assigned to a vApp snapshot object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "vApp Instant Recovery options.", + "schema": { + "$ref": "#/definitions/VappInstantRecoveryOptions" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/job/{id}/child_job_instance": { + "get": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - list of child job instances", + "description": "REQUIRES SUPPORT TOKEN - Returns an array of child job instances for the given parent job instance. Returns an empty array when the job has no child job instances.", + "operationId": "getChildJobInstances", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of parent job instance.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Job instance details for all children.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/InternalJobInstanceDetail" + } + } + } + }, + "x-group": "internal_job" + } + }, + "/fileset/snapshot/{id}/export_files": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Create an export job to export multiple files or directories", + "description": "Starts a job that exports one or more files or folders from a fileset backup to the destination host. Returns the job status as of the job creation time. This job status includes the job ID.", + "operationId": "createFilesetExportFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "The configuration of a job that exports one or more files or folders from a fileset backup.", + "required": true, + "schema": { + "$ref": "#/definitions/FilesetExportFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of the export request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "fileset" + } + }, + "/data_location/recover_archived_metadata": { + "post": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - DataLocationId from which to start the recovery process", + "description": "REQUIRES SUPPORT TOKEN - DataLocationId from which to start the recovery process. A support token is required for this operation.", + "operationId": "recoverArchivedMetadata", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "ID of the data location to recover from.", + "required": true, + "schema": { + "$ref": "#/definitions/RecoverArchivedMetadataRequest" + } + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "$ref": "#/definitions/RecoverArchivedMetadataResponse" + } + } + }, + "x-group": "archival" + } + }, + "/cluster/{id}/ipmi": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get IPMI details", + "description": "get IPMI details of availability and enabled access in the cluster.", + "operationId": "getIpmi", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "details of IPMI settings.", + "schema": { + "$ref": "#/definitions/IpmiDetails" + } + } + }, + "x-group": "cluster" + }, + "patch": { + "tags": [ + "/cluster" + ], + "summary": "Modify IPMI settings", + "description": "modify IPMI settings.", + "operationId": "modifyIpmi", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "update_properties", + "description": "password to set.", + "required": true, + "schema": { + "$ref": "#/definitions/IpmiUpdate" + } + } + ], + "responses": { + "200": { + "description": "Successfully set the IPMI password for the cluster.", + "schema": { + "$ref": "#/definitions/IpmiDetails" + } + } + }, + "x-group": "cluster" + } + }, + "/managed_volume/snapshot/{id}/browse": { + "get": { + "tags": [ + "/managed_volume" + ], + "summary": "Lists all files in Managed volume snapshot", + "description": "Lists all files and directories in a given path.", + "operationId": "browseManagedVolumeSnapshot", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of snapshot.", + "required": true, + "type": "string" + }, + { + "name": "path", + "in": "query", + "description": "The absolute path of the starting point for the directory listing.", + "required": true, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Starting position in the list of path entries contained in the query results, sorted by lexicographical order. The response includes the specified numbered entry and all higher numbered entries.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "List of files and directories at the specified path.", + "schema": { + "$ref": "#/definitions/BrowseResponseListResponse" + } + } + }, + "x-group": "managed_volume" + } + }, + "/volume_group/request/{id}": { + "get": { + "tags": [ + "/volume_group" + ], + "summary": "Get Volume Group async request details", + "description": "Get details about a Volume Group-related async request.", + "operationId": "getVolumeGroupAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "volume_group" + } + }, + "/cluster/{id}/vlan": { + "post": { + "tags": [ + "/cluster" + ], + "summary": "Adds a VLAN interface on the cluster or re-ips a VLAN", + "description": "Edits tagged interfaces on the cluster by VLAN.", + "operationId": "configureVlan", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "vlan_info", + "description": "VLAN Configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/VlanConfig" + } + } + ], + "responses": { + "204": { + "description": "Successfully added VLANs to the cluster." + }, + "422": { + "description": "Returned if ips conflict.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "cluster" + }, + "delete": { + "tags": [ + "/cluster" + ], + "summary": "Drops a VLAN from the cluster", + "description": "Drops interfaces assigend to the specified VLAN on the cluster.", + "operationId": "deleteVlan", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "vlan_id", + "in": "query", + "description": "VLAN.", + "required": true, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "204": { + "description": "Successfully drops VLAN interface." + }, + "422": { + "description": "Returned if ips conflict.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "cluster" + }, + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get configured VLAN interfaces for a Rubrik Cluster cluster", + "description": "Retrieves the VLANs configured on a Rubrik Cluster and their ip configurations.", + "operationId": "getVlan", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "name": "vlan", + "in": "query", + "description": "Will retrieve info for a specific VLAN if passed in.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "VLAN configurations for the Rubrik cluster.", + "schema": { + "$ref": "#/definitions/VlanConfigListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/stats/cloud_storage/ingested": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot ingested amount for cloud", + "description": "Retrieve the amount of data ingested for all snapshots on the cloud.", + "operationId": "ingestedCloudStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/archive/dca": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Add a new DCA archival location", + "description": "Add a new DCA archival location. Initiates an asynchronous job to connect to the archival location.", + "operationId": "createDcaLocation", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaLocationDefinition" + } + } + ], + "responses": { + "202": { + "description": "Returns the job ID for connecting to a new DCA archival location.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "x-group": "archival" + }, + "get": { + "tags": [ + "/archive" + ], + "summary": "Retrieve an array of DCA archival location objects", + "description": "Returns an array containing DCA archival locations, and information specific to them. DCA is a subset of the normal object storage locations, so these DCA locations will also be returned by the /archive/object_store GET API.", + "operationId": "queryDcaLocations", + "parameters": [], + "responses": { + "200": { + "description": "Returns list of DCA archival locations.", + "schema": { + "$ref": "#/definitions/DcaLocationDetailListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/archive/qstar/{id}": { + "get": { + "tags": [ + "/archive" + ], + "summary": "Get information for a QStar archival location", + "description": "Retrieve summary information for a specified QStar archival location.", + "operationId": "queryQstarLocationbyId", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a QStar archival location object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Information about a QStar archival location.", + "schema": { + "$ref": "#/definitions/QstarLocationDetail" + } + } + }, + "x-group": "archival" + }, + "patch": { + "tags": [ + "/archive" + ], + "summary": "Update a QStar archival location", + "description": "Update the properties of a QStar archival location object.", + "operationId": "updateQstarLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a QStar archival location object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "updated_definition", + "description": "Object that contains information about the specified QStar archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/QstarLocationUpdate" + } + } + ], + "responses": { + "200": { + "description": "Updated QStar archival location object.", + "schema": { + "$ref": "#/definitions/QstarLocationDetail" + } + } + }, + "x-group": "archival" + } + }, + "/storage/array/host/{id}": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get storage array information for a host", + "description": "Retrieve detailed information about the storage arrays and storage array volumes that are associated with a specified host object.", + "operationId": "getStorageArrayHostSummary", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the registered host.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Storage array information for a host.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/StorageArrayHostDetail" + } + } + } + }, + "x-group": "storage_array_host" + } + }, + "/role/{id}/authorization/bulk_revoke": { + "post": { + "tags": [ + "/authorization" + ], + "summary": "Revokes authorizations from a role", + "description": "Revoke the specified authorizations from the specified role.", + "operationId": "revokeAuthorizations", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "The role ID of the role whose authorizations are being revoked.\n", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "authorization_specifications", + "description": "Specification that describes the authorizations being revoked from the role.\n", + "required": true, + "schema": { + "$ref": "#/definitions/AuthorizationSpecifications" + } + } + ], + "responses": { + "200": { + "description": "Summary of authorizations granted to the role.", + "schema": { + "$ref": "#/definitions/RoleAuthorizationSummary" + } + } + }, + "x-group": "role_authorization" + } + }, + "/organization/{id}/windows/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get Window fileset metrics", + "description": "Retrieve the total object count and total protected object count.", + "operationId": "getWindowsMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specify the organization id.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_host" + } + }, + "/aws/ec2_instance": { + "get": { + "tags": [ + "/aws/ec2_instance" + ], + "summary": "Get EC2 instance objects", + "description": "Retrieve summary information for all EC2 instance objects from all AWS account objects.", + "operationId": "queryAwsEc2Instance", + "parameters": [ + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search for EC2 instance objects by matching a string to a part of the ID or name of the EC2 instance object.", + "required": false, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by effective SLA Domain ID.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Specifies the method used to apply an SLA Domain to an EC2 instance object. Possible values are Derived, Direct and Unassigned.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "region", + "in": "query", + "description": "Filter the query results using the region of the EC2 instance object.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter the query results by using the ID of the primary Rubrik cluster. Use local to refer to the Rubrik cluster that is hosting the current API session.", + "required": false, + "type": "string" + }, + { + "name": "is_relic", + "in": "query", + "description": "Filter the summary information based on the relic status of the EC2 instance. Returns both relic and non relic if the parameter is not set.", + "required": false, + "type": "boolean" + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "default": "instanceId", + "enum": [ + "instanceId", + "instanceName", + "instanceType", + "accountName", + "region", + "effectiveSlaDomainName", + "slaAssignment" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results.", + "required": false, + "type": "string", + "default": "asc", + "enum": [ + "asc", + "desc" + ] + }, + { + "name": "include_backup_task_info", + "in": "query", + "description": "Include backup task information in response.", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "200": { + "description": "Summary list of EC2 instance objects.", + "schema": { + "$ref": "#/definitions/AwsEc2InstanceSummaryListResponse" + } + } + }, + "x-group": "aws_ec2_instance" + } + }, + "/vmware/vm/snapshot/mount": { + "get": { + "tags": [ + "/vmware/vm" + ], + "summary": "Get summary information for all live mounts", + "description": "Retrieve the following information for all live mounts: ID, snapshot date, ID of source VM, name of source VM, ID of source host, status of the mount, mount event ID, and unmount event ID.", + "operationId": "queryMount", + "parameters": [ + { + "name": "vm_id", + "in": "query", + "description": "Filters live mounts by VM ID.", + "required": false, + "type": "string" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned. Default is 25.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + } + ], + "responses": { + "200": { + "description": "Returns summary information for all live mounts.", + "schema": { + "$ref": "#/definitions/VmwareVmMountSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "vm" + } + }, + "/stats/sla_domain_storage/time_series": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get a timeseries of storage growth for the given SLA domain", + "description": "Get a timeseries of storage growth timeseries for the given SLA domain.", + "operationId": "slaDomainStorageTimeseries", + "parameters": [ + { + "name": "id", + "in": "query", + "description": "SLA Domain ID.", + "required": true, + "type": "string" + }, + { + "name": "range", + "in": "query", + "description": "Range for timeseries. eg: -1h, -1min, etc. Default value is -1h.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a timeSeries depicting bytes per second.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "x-group": "stats" + } + }, + "/aws/account/{id}/security_group": { + "get": { + "tags": [ + "/aws/account" + ], + "summary": "Get security groups by AWS account", + "description": "Retrieve a list of the security groups available for a specified AWS account ID.", + "operationId": "queryAwsAccountSecurityGroup", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to an AWS account object.", + "required": true, + "type": "string" + }, + { + "name": "vpc_id", + "in": "query", + "description": "AWS ID for a virtual private cloud (VPC).", + "required": true, + "type": "string" + }, + { + "name": "region", + "in": "query", + "description": "Name of an AWS region.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of security group IDs for a specified AWS account.", + "schema": { + "$ref": "#/definitions/SecurityGroupListResponse" + } + } + }, + "x-group": "aws_account" + } + }, + "/vmware/vm/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/vmware/vm" + ], + "summary": "Download multiple files and folders", + "description": "Start an asynchronous job to download multiple files and folders from a specified VMware backup. The response returns an asynchronous request ID. Get the URL for downloading the ZIP file including the specific files/folders by sending a GET request to 'vmware/vm/request/{id}'.", + "operationId": "createDownloadFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a virtual machine snapshot object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration object containing an array with the full paths of the files and folders to download. The array has to contain at least one full path.", + "required": true, + "schema": { + "$ref": "#/definitions/DownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of request for file and folder download.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "vm" + } + }, + "/cluster/{id}/dns_search_domain": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get DNS search domains", + "description": "Retrieve a list the DNS search domains assigned to the Rubrik cluster.", + "operationId": "getClusterDnsSearchDomains", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of the DNS search domains assigned to the specified Rubrik cluster.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "cluster" + }, + "post": { + "tags": [ + "/cluster" + ], + "summary": "Assign DNS search domains", + "description": "Assign DNS search domains to the Rubrik cluster.", + "operationId": "setClusterDnsSearchDomains", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster or *me* for self.", + "required": true, + "type": "string", + "default": "me" + }, + { + "in": "body", + "name": "domains", + "description": "List of the DNS search domains.", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "204": { + "description": "Successfully assigned the specified DNS search domains to the Rubrik cluster." + } + }, + "x-group": "cluster" + } + }, + "/config/usersettable_hyperv": { + "get": { + "tags": [ + "/config" + ], + "summary": "Fetch the global Hyperv configuration", + "description": "Fetch the global Hyperv configuration.", + "operationId": "getUserSettableHypervConfig", + "parameters": [], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalHypervConfig" + } + } + }, + "x-group": "internal_config" + }, + "patch": { + "tags": [ + "/config" + ], + "summary": "Update the global Hyperv configuration", + "description": "Update the global Hyperv configuration.", + "operationId": "updateUserSettableHypervConfig", + "parameters": [ + { + "in": "body", + "name": "new_values", + "description": "New configuration values.", + "required": true, + "schema": { + "$ref": "#/definitions/UserSettableGlobalHypervConfig" + } + } + ], + "responses": { + "200": { + "description": "global configuration.", + "schema": { + "$ref": "#/definitions/UserSettableGlobalHypervConfig" + } + } + }, + "x-group": "internal_config" + } + }, + "/fileset/snapshot/{id}/download_files": { + "post": { + "tags": [ + "/fileset" + ], + "summary": "Download files from a fileset backup", + "description": "Start an asynchronous job to download multiple files and folders from a specified fileset backup. The response returns an asynchronous request ID. Get the URL for downloading the ZIP file including the specific files/folders by sending a GET request to 'fileset/request/{id}'.", + "operationId": "createFilesetDownloadFilesJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a fileset backup object.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration information for a job to download files and folders from a fileset backup.", + "required": true, + "schema": { + "$ref": "#/definitions/FilesetDownloadFilesJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of an async job to download files and folders from a fileset backup.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "fileset" + } + }, + "/polaris/export_info": { + "post": { + "tags": [ + "/polaris" + ], + "summary": "Export information about a given object type", + "description": "Trigger an asynchronous job that uploads a file with metadata information about the given object type.", + "operationId": "exportObjectInfo", + "parameters": [ + { + "in": "body", + "name": "export_info_config", + "description": "Config for export info job.", + "required": true, + "schema": { + "$ref": "#/definitions/ExportInfoConfig" + } + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous job to get object information.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/authorization/effective/roles": { + "get": { + "tags": [ + "/authorization" + ], + "summary": "Queries the current effective authorizations by role", + "description": "Queries the current effective authorizations by role.", + "operationId": "effectiveRoles", + "parameters": [ + { + "name": "principal", + "in": "query", + "description": "Optional principal whose authorizations to query; if not specified, the current authenticated user will be used.\n", + "required": false, + "type": "string" + }, + { + "name": "resource_types", + "in": "query", + "description": "Optional comma-separated list of resource types. If specified, only those roles and privileges are returned for which the principal has an authorization for at least one of the types in the list. Accepted values are: .\n", + "required": false, + "type": "array", + "items": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The list of roles and privileges for which the desired principal has effective authorizations.\n", + "schema": { + "$ref": "#/definitions/EffectiveAuthorizationRoles" + } + }, + "400": { + "description": "Returned if an invalid ManagedId is given.", + "schema": { + "type": "string" + } + } + }, + "x-group": "authorization" + } + }, + "/vmware/vm/snapshot/{id}/mount_disks": { + "post": { + "tags": [ + "/vmware/vm" + ], + "summary": "Attaching disks from a snapshot to an existing virtual machine", + "description": "Requests a snapshot mount to attach disks to an existing virtual machine.", + "operationId": "createMountDiskJob", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a snapshot.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration for the mount request.", + "required": true, + "schema": { + "$ref": "#/definitions/MountDiskJobConfig" + } + } + ], + "responses": { + "202": { + "description": "Status of the mount request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "vm" + } + }, + "/job/instance": { + "get": { + "tags": [ + "/job" + ], + "summary": "REQUIRES SUPPORT TOKEN - Query for job instances", + "description": "REQUIRES SUPPORT TOKEN - This is just for easier diagnosis to figure out what jobs there are in the system. A support token is required for this operation.", + "operationId": "queryInstances", + "parameters": [ + { + "name": "status", + "in": "query", + "description": "Status of job instance.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "TODO.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/InternalJobInstanceDetail" + } + } + } + }, + "x-group": "internal_job" + } + }, + "/vmware/guest_credential": { + "post": { + "tags": [ + "/vmware/guest_credential" + ], + "summary": "Create a new guest OS credential", + "description": "Create a new guest OS credential.", + "operationId": "createGuestCredential", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object for guest OS credential definition.", + "required": true, + "schema": { + "$ref": "#/definitions/GuestCredentialDefinition" + } + } + ], + "responses": { + "201": { + "description": "Returns the created guest OS credential object.", + "schema": { + "$ref": "#/definitions/GuestCredentialDetail" + } + } + }, + "x-group": "guest_credential" + }, + "get": { + "tags": [ + "/vmware/guest_credential" + ], + "summary": "Summary of all guest OS credentials", + "description": "Retrieve the ID, domain, username and password for all guest OS credentials.", + "operationId": "queryGuestCredential", + "parameters": [], + "responses": { + "200": { + "description": "Guest OS credentials.", + "schema": { + "$ref": "#/definitions/GuestCredentialDetailListResponse" + } + } + }, + "x-group": "guest_credential" + } + }, + "/stats/cloud_storage/logical": { + "get": { + "tags": [ + "/stats" + ], + "summary": "Get snapshot logical cloud storage", + "description": "Retrieve the amount of logical cloud storage used by all snapshots from the Rubrik cluster.", + "operationId": "logicalCloudStorage", + "parameters": [], + "responses": { + "200": { + "description": "Returns an object with attribute: name(String), key(String), value(String), frequencyInMin(Integer), lastUpdateTime(Date).", + "schema": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "x-group": "stats" + } + }, + "/cloud_on/aws/security_group": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get list of security group IDs queried by Aws location and VPC IDs", + "operationId": "queryAwsSecurityGroup", + "parameters": [ + { + "name": "data_location_id", + "in": "query", + "description": "Aws data location ID.", + "required": true, + "type": "string" + }, + { + "name": "vpc_id", + "in": "query", + "description": "VPC ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns a list of security group IDs recognizable by Aws.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityGroup" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/vmware/standalone_host/datastore": { + "post": { + "tags": [ + "/vmware/standalone_host" + ], + "summary": "List ESXi datastores", + "description": "Retrieve a list of the datastores for a specified ESXi host.", + "operationId": "getEsxiDatastores", + "parameters": [ + { + "in": "body", + "name": "login_info", + "required": true, + "schema": { + "$ref": "#/definitions/VsphereLoginInfo" + } + } + ], + "responses": { + "200": { + "description": "Returns a list of all datastore names for host.", + "schema": { + "$ref": "#/definitions/DatastoreListResponse" + } + } + }, + "x-group": "standalone_host" + } + }, + "/aws/account/dca/{id}": { + "patch": { + "tags": [ + "/aws/account/dca" + ], + "summary": "Update a DCA AWS account", + "description": "Provide updated information for a specified DCA AWS account object.", + "operationId": "updateDcaAwsAccount", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of a DCA AWS account object to update.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "config", + "description": "Configuration to use to update an AWS account.", + "required": true, + "schema": { + "$ref": "#/definitions/DcaAwsAccountUpdate" + } + } + ], + "responses": { + "200": { + "description": "Updated AWS account details.", + "schema": { + "$ref": "#/definitions/AwsAccountDetail" + } + } + }, + "x-group": "aws_account" + } + }, + "/organization/{id}/hyperv/vm/metric": { + "get": { + "tags": [ + "/organization" + ], + "summary": "Get hyperv vm metrics", + "description": "Retrieve the total object count, total protected object and no sla object count.", + "operationId": "getHypervVmMetric", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Specifies the ID of an organization.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns an object with metrics.", + "schema": { + "$ref": "#/definitions/OrganizationResourceMetric" + } + } + }, + "x-group": "organization_hyperv" + } + }, + "/storage/array/hierarchy/{id}/descendants": { + "get": { + "tags": [ + "/storage/array" + ], + "summary": "Get list of descendant objects", + "description": "Retrieve the list of descendant objects for the specified storage array.", + "operationId": "getStorageArrayHierarchyDescendants", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID assigned to a storage array hierarchy object. To specify a top-level node, use root as the ID.", + "required": true, + "type": "string" + }, + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by the ID of the effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by the object type.", + "required": false, + "type": "string", + "enum": [ + "AppBlueprint", + "AwsAccount", + "CloudCompute", + "CloudComputeRegion", + "CloudNativeAuthzRoot", + "ComputeCluster", + "DataCenter", + "DataStore", + "Ec2Instance", + "ExclusionPattern", + "ExclusionPatternAuthzRoot", + "Folder", + "Hdfs", + "HostFailoverCluster", + "HostRoot", + "HypervAuthzRoot", + "HypervCluster", + "HypervScvmm", + "HypervServer", + "HypervVirtualMachine", + "FailoverClusterApp", + "KuprHost", + "KuprHostAuthzRoot", + "LinuxFileset", + "LinuxHost", + "LinuxHostAuthzRoot", + "ManagedVolume", + "ManagedVolumeAuthzRoot", + "ManagedVolumeRoot", + "MssqlAuthzRoot", + "MssqlDatabase", + "MssqlAvailabilityGroup", + "MssqlInstance", + "NasHost", + "NasHostAuthzRoot", + "NasSystem", + "NfsHostShare", + "NutanixAuthzRoot", + "NutanixCluster", + "NutanixVirtualMachine", + "OracleAuthzRoot", + "OracleDatabase", + "OracleHost", + "OracleRac", + "OracleRoot", + "SapHanaAuthzRoot", + "SapHanaDatabase", + "SapHanaSystem", + "ShareFileset", + "SlaDomain", + "SmbHostShare", + "StorageArray", + "StorageArrayVolume", + "StorageArrayVolumeGroup", + "Storm", + "User", + "vCenter", + "Vcd", + "VcdAuthzRoot", + "VcdCatalog", + "VcdOrg", + "VcdOrgVdc", + "VcdVapp", + "VcdVimServer", + "VirtualMachine", + "VmwareAuthzRoot", + "VmwareHost", + "VmwareResourcePool", + "VmwareStoragePolicy", + "VmwareTag", + "VmwareTagCategory", + "WindowsCluster", + "WindowsFileset", + "WindowsHost", + "WindowsHostAuthzRoot", + "WindowsVolumeGroup" + ] + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by the ID of the primary Rubrik CDM instance. Use local to specify the Rubrik CDM instance that is hosting the current API session.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Return only the specified number of objects from the query results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Return a subset of the query results, starting with the specified number in the sequence of results.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Filter by the object name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by the SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Specify an attribute to use to sort the query results.", + "required": false, + "type": "string", + "enum": [ + "name", + "effectiveSlaDomainName", + "descendantCountArray", + "descendantCountVolume", + "descendantCountVolumeGroup" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Specify the sort order to use when sorting query results, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Summary list of descendants of a storage array hierarchy object.", + "schema": { + "$ref": "#/definitions/StorageArrayHierarchyObjectSummaryListResponse" + } + } + }, + "x-group": "storage_array_volume_group" + } + }, + "/data_location/object_store/remove_bucket": { + "post": { + "tags": [ + "/data_location" + ], + "summary": "REQUIRES SUPPORT TOKEN - Remove all buckets matching given prefix", + "description": "REQUIRES SUPPORT TOKEN - To be used by internal tests to remove all object store buckets matching given prefix. Returns a list of buckets successfully removed. A support token is required for this operation.", + "operationId": "removeObjectStoreBucket", + "parameters": [ + { + "in": "body", + "name": "request", + "description": "Remove bucket request configurations.", + "required": true, + "schema": { + "$ref": "#/definitions/RemoveObjectStoreBucketRequest" + } + } + ], + "responses": { + "200": { + "description": "List of buckets removed.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "x-group": "archival" + } + }, + "/archive/object_store": { + "post": { + "tags": [ + "/archive" + ], + "summary": "Add an object storage archival location", + "description": "Start an asynchronous job to add a new object storage location.\n", + "operationId": "createObjectStoreLocation", + "parameters": [ + { + "in": "body", + "name": "definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDefinition" + } + } + ], + "responses": { + "202": { + "description": "Returns the job ID for connecting to a new object storage location.\n", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "x-group": "archival" + }, + "get": { + "tags": [ + "/archive" + ], + "summary": "Get object storage archival locations", + "description": "Retrieve an array of archival locations from the Rubrik cluster. The array contains all object storage locations, but does not include NFS and QStar locations.\n", + "operationId": "queryObjectStoreLocations", + "parameters": [], + "responses": { + "200": { + "description": "Returns an array of object storage locations.\n", + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDetailListResponse" + } + } + }, + "x-group": "archival" + } + }, + "/hyperv/host/{id}/refresh": { + "post": { + "tags": [ + "/hyperv/host" + ], + "summary": "Refresh Hyper-V host metadata", + "description": "Create a job to refresh the metadata for the specified Hyper-V host.", + "operationId": "refreshHypervHost", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Hyper-V host.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Job Instance ID of the scheduled Hyper-V host refresh job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hyperv_host" + } + }, + "/host/bulk-delete": { + "post": { + "tags": [ + "/host" + ], + "summary": "Deregister hosts", + "description": "Deregister specified network hosts from Rubrik clusters.", + "operationId": "bulkDeleteHost", + "parameters": [ + { + "in": "body", + "name": "ids", + "description": "Provide the ID of each host to deregister.", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string", + "description": "ID of the registered host." + } + } + } + ], + "responses": { + "204": { + "description": "Specified host were successfully deregistered." + } + }, + "x-group": "hosts" + } + }, + "/node_management/default_gateway": { + "get": { + "tags": [ + "/node_management" + ], + "summary": "Get current default gateway", + "description": "Get current default gateway.", + "operationId": "getDefaultGateway", + "parameters": [], + "responses": { + "200": { + "description": "Returns current default gateways for both IPv4 and IPv6.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/RouteConfig" + } + } + }, + "422": { + "description": "Returned response when failed to get default gateway.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + }, + "post": { + "tags": [ + "/node_management" + ], + "summary": "Update the default gateway", + "description": "Update the default gateway.", + "operationId": "setDefaultGateway", + "parameters": [ + { + "in": "body", + "name": "default_gateway", + "description": "Default gateway.", + "required": true, + "schema": { + "$ref": "#/definitions/RouteConfig" + } + } + ], + "responses": { + "204": { + "description": "Returned if default gateway is updated." + }, + "422": { + "description": "Returned response when default gateway has failed to update.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "internal_node_management" + } + }, + "/host/request/{id}": { + "get": { + "tags": [ + "/host" + ], + "summary": "Get host async request", + "description": "Get details about a host related async request.", + "operationId": "getHostAsyncRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status for the async request.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "hosts" + } + }, + "/hyperv/cluster": { + "get": { + "tags": [ + "/hyperv/cluster" + ], + "summary": "Get summary of all the Hyper-V hlusters", + "description": "Get summary of all the Hyper-V hlusters.", + "operationId": "queryHypervCluster", + "parameters": [ + { + "name": "effective_sla_domain_id", + "in": "query", + "description": "Filter by ID of effective SLA domain.", + "required": false, + "type": "string" + }, + { + "name": "primary_cluster_id", + "in": "query", + "description": "Filter by primary cluster ID, or **local**.", + "required": false, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "offset", + "in": "query", + "description": "Ignore these many matches in the beginning.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "name", + "in": "query", + "description": "Search vm by vm name.", + "required": false, + "type": "string" + }, + { + "name": "sla_assignment", + "in": "query", + "description": "Filter by SLA assignment type.", + "required": false, + "type": "string", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "A comma-separated list of attributes to sort the results on.", + "required": false, + "type": "string", + "enum": [ + "effectiveSlaDomainName", + "name" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "Sort order, either ascending or descending.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "List of Hyper-V cluster summaries.", + "schema": { + "$ref": "#/definitions/HypervClusterSummaryListResponse" + } + } + }, + "x-group": "hyperv_cluster" + } + }, + "/cluster/{id}/auto_removed_node": { + "get": { + "tags": [ + "/cluster" + ], + "summary": "Get auto removed nodes", + "description": "Retrieve the list of nodes that were automatically removed from the Rubrik cluster for which the node removal has not been acknowledged.", + "operationId": "getClusterAutoRemovedNodes", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Rubrik cluster. Use the string \"me\" for a cluster to identify itself.", + "required": true, + "type": "string", + "default": "me" + } + ], + "responses": { + "200": { + "description": "List of auto removed node statuses.", + "schema": { + "$ref": "#/definitions/AutoRemovedNodeStatusListResponse" + } + } + }, + "x-group": "cluster" + } + }, + "/nutanix/vm/{id}": { + "get": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Get VM details", + "description": "Detailed view of a Nutanix VM.", + "operationId": "getNutanixVm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Nutanix Virtual Machine.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Return details about the Nutanix Virtual machine.", + "schema": { + "$ref": "#/definitions/NutanixVmDetail" + } + } + }, + "x-group": "nutanix_vm" + }, + "patch": { + "tags": [ + "/nutanix/vm" + ], + "summary": "Patch VM", + "description": "Patch VM with specified properties.", + "operationId": "patchNutanixVm", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of Nutanix Virtual Machine.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "vm_patch_properties", + "description": "Properties to patch.", + "required": true, + "schema": { + "$ref": "#/definitions/NutanixVmPatch" + } + } + ], + "responses": { + "200": { + "description": "Return details about Nutanix virtual machine.", + "schema": { + "$ref": "#/definitions/NutanixVmDetail" + } + } + }, + "x-group": "nutanix_vm" + } + }, + "/vmware/vcenter/{id}/tag": { + "get": { + "tags": [ + "/vmware/vcenter" + ], + "summary": "Get Tags associated with vCenter", + "description": "Get a list of tags on this vCenter.", + "operationId": "getVsphereTags", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the vCenter Server.", + "required": true, + "type": "string" + }, + { + "name": "category_id", + "in": "query", + "description": "ID of the Category to filter the Tags by.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "List of vCenter Tags.", + "schema": { + "$ref": "#/definitions/VsphereTagListResponse" + } + } + }, + "x-group": "vcenter" + } + }, + "/unmanaged_object/snapshot/bulk_delete": { + "post": { + "tags": [ + "/unmanaged_object" + ], + "summary": "(DEPRECATED)Bulk delete all unmanaged snapshots for the given objects", + "description": "Bulk delete all unmanaged snapshots for the objects specified by objectId/objectType pairings. API returning success does not guarantee that the snapshots will be expired.", + "operationId": "bulkDeleteUnmanagedObjectSnapshots", + "parameters": [ + { + "in": "body", + "name": "delete_unmanaged_config", + "description": "List of objectId/objectType pairings for which to delete all snapshots.", + "required": true, + "schema": { + "$ref": "#/definitions/BulkDeleteUnmanagedObjectSnapshots" + } + } + ], + "responses": { + "204": { + "description": "OK on success, success doesn't imply all snapshots will be deleted." + }, + "422": { + "description": "Returned if delete API fails.", + "schema": { + "$ref": "#/definitions/RequestFailedException" + } + } + }, + "x-group": "unmanaged_object" + } + }, + "/search": { + "get": { + "tags": [ + "/search" + ], + "summary": "Search for file", + "description": "Search for a file within the backup of a specified virtual machine, fileset, host, or storage array volume group. Perform the search by using a search string that consists of a prefix portion of the full path of the file or a prefix portion of the filename.", + "operationId": "searchQuery", + "parameters": [ + { + "name": "managed_id", + "in": "query", + "description": "ID assigned to the object to be searched. The object must represent a snapshot or backup of a virtual machine, fileset, host, or storage array volume group.", + "required": true, + "type": "string" + }, + { + "name": "query_string", + "in": "query", + "description": "Search string that consists of a prefix portion of the full path of the file or a prefix portion of the filename.", + "required": true, + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "Maximum number of entries in the response.", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "cursor", + "in": "query", + "description": "Pagination cursor returned by the previous request.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Search results.", + "schema": { + "$ref": "#/definitions/SearchResponseListResponse" + } + } + }, + "x-group": "search" + } + }, + "/hierarchy/{id}": { + "get": { + "tags": [ + "/hierarchy" + ], + "summary": "Get summary of a hierarchy object", + "description": "Retrieve details for the specified storage hierarchy object.", + "operationId": "getHierarchyObject", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the hierarchy object.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Details of the hierarchy object.", + "schema": { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + } + } + }, + "x-group": "hierarchy" + } + }, + "/mssql/db/{id}/recoverable_range/download": { + "delete": { + "tags": [ + "/mssql" + ], + "summary": "(DEPRECATED) Delete downloaded recoverable ranges of a Microsoft SQL database", + "description": "Deletes all local snapshots and logs that have previously been downloaded. A begin and/or end time can be provided to delete only the downloaded snapshots and logs that fall within the window. The time is relative to when the snapshot or log backup was originally taken, not downloaded. Parts of the window may not be deleted if certain log files must be kept to preserve times outside the window. Data is deleted in the background. To check the status of the deletion, poll /mssql/db/recoverable_range/download/{id}. This endpoint will be removed in CDM v6.0 in favor of `DELETE v1/mssql/db/{id}/recoverable_range/download`.", + "operationId": "deleteDownloadedMssqlDbRecoverableRanges", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of the Microsoft SQL database.", + "required": true, + "type": "string" + }, + { + "name": "after_time", + "in": "query", + "description": "Delete only the downloaded snapshots and logs taken after this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + }, + { + "name": "before_time", + "in": "query", + "description": "Delete only the downloaded snapshots and logs taken before this time. The date-time string should be in ISO8601 format, such as \"2016-01-01T01:23:45.678\".", + "required": false, + "type": "string", + "format": "date-time" + } + ], + "responses": { + "202": { + "description": "Returns the job ID to check the progress of deleting the downloaded snapshots and logs.", + "schema": { + "$ref": "#/definitions/JobScheduledResponse" + } + } + }, + "deprecated": true, + "x-group": "mssql" + } + }, + "/vcd/vapp/snapshot/{snapshot_id}/export/options": { + "get": { + "tags": [ + "/vcd/vapp" + ], + "summary": "(DEPRECATED) Get exportable network configurations", + "description": "Retrieve summary information for the vApp networks that are available for network connections from the virtual machines in the exported vApp snapshot. The summary also specifies the default vApp network for each virtual machine network connection. This endpoint will be removed in CDM v6.1 in favor of `GET v1/vcd/vapp/snapshot/{snapshot_id}/export/options`.", + "operationId": "getVappSnapshotExportOptions", + "parameters": [ + { + "name": "snapshot_id", + "in": "path", + "description": "ID assigned to the vApp snapshot object to export.", + "required": true, + "type": "string" + }, + { + "name": "export_mode", + "in": "query", + "description": "Target type for the specified vApp export.", + "required": true, + "type": "string", + "enum": [ + "ExportToNewVapp", + "ExportToTargetVapp" + ] + }, + { + "name": "target_vapp_id", + "in": "query", + "description": "ID assigned to the target vApp object, when the export is into an existing vApp. When the export is not into a target vApp, remove the 'target_vapp_id' member.", + "required": false, + "type": "string" + }, + { + "name": "target_org_vdc_id", + "in": "query", + "description": "ID assigned to a target organization VDC object. Use the ID when exporting a vApp snapshot to create a new vApp on the specified target organization VDC. When the exported vApp snapshot is not used to create a new vApp on a target organization VDC, remove the 'target_org_vdc_id' member.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "vApp snapshot export options.", + "schema": { + "$ref": "#/definitions/VappExportOptions" + } + } + }, + "deprecated": true, + "x-group": "vcd_vapp" + } + }, + "/cloud_on/aws/subnet": { + "get": { + "tags": [ + "/cloud_on" + ], + "summary": "Get a list of subnets queried by Aws location ID", + "operationId": "queryAwsSubnet", + "parameters": [ + { + "name": "data_location_id", + "in": "query", + "description": "data location ID.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Returns list of subnet IDs and respective VPC IDs in AWS.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Subnet" + } + } + } + }, + "x-group": "cloud_instance" + } + }, + "/polaris/export_thrift/request/{id}": { + "get": { + "tags": [ + "/polaris" + ], + "summary": "Get asynchronous request details for an export Thrift metadata job", + "description": "Get the details of an asynchronous request that runs an export Thrift metadata job.", + "operationId": "getExportThriftMetadataRequestStatus", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of an asynchronous export thrift metadata request.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Status of an asynchronous export thrift metadata job.", + "schema": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "x-group": "polaris" + } + }, + "/unmanaged_object": { + "get": { + "tags": [ + "/unmanaged_object" + ], + "summary": "(DEPRECATED) Get summary of all the objects with unmanaged snapshots", + "description": "Get summary of all the objects with unmanaged snapshots. This endpoint will be removed in CDM v5.3 in favor of `GET /v1/unmanaged_object`.", + "operationId": "queryUnmanagedObject", + "parameters": [ + { + "name": "limit", + "in": "query", + "description": "Limit the number of matches returned.", + "required": false, + "type": "integer", + "minimum": 0, + "format": "int32" + }, + { + "name": "after_id", + "in": "query", + "description": "First unmanaged object after which objects should be retrieved.", + "required": false, + "type": "string" + }, + { + "name": "search_value", + "in": "query", + "description": "Search object by object name.", + "required": false, + "type": "string" + }, + { + "name": "unmanaged_status", + "in": "query", + "description": "Filter by the type of the object. If not specified, will return all objects. Valid attributes are Protected, Relic and Unprotected.", + "required": false, + "type": "string", + "enum": [ + "Protected", + "Relic", + "Unprotected", + "ReplicatedRelic", + "RemoteUnprotected" + ] + }, + { + "name": "object_type", + "in": "query", + "description": "Filter by the type of the unmanaged object.", + "required": false, + "type": "string", + "enum": [ + "VirtualMachine", + "MssqlDatabase", + "LinuxFileset", + "WindowsFileset", + "ShareFileset", + "NutanixVirtualMachine", + "HypervVirtualMachine", + "ManagedVolume", + "Ec2Instance", + "StorageArrayVolumeGroup", + "VcdVapp", + "LinuxHost", + "WindowsHost", + "OracleDatabase", + "VolumeGroup", + "AppBlueprint" + ] + }, + { + "name": "sort_by", + "in": "query", + "description": "Sort the result by given attribute.", + "required": false, + "type": "string", + "enum": [ + "Name", + "UnmanagedStatus", + "Location", + "UnmanagedSnapshotCount", + "LocalStorage", + "ArchiveStorage", + "RetentionSlaDomainName", + "ObjectType", + "SnapshotCount", + "AutoSnapshotCount", + "ManualSnapshotCount" + ] + }, + { + "name": "sort_order", + "in": "query", + "description": "The sort order. Defaults to asc if not specified.", + "required": false, + "type": "string", + "enum": [ + "asc", + "desc" + ] + } + ], + "responses": { + "200": { + "description": "Get page summary about objects with unmanaged snapshots.", + "schema": { + "$ref": "#/definitions/UnmanagedObjectSummaryListResponse" + } + } + }, + "deprecated": true, + "x-group": "unmanaged_object" + } + }, + "/polaris/archive/object_store/{id}": { + "patch": { + "tags": [ + "/polaris/archive" + ], + "summary": "Update a Polaris managed object storage archival location", + "description": "Update the properties of an Polaris managed object storage location. To update the bucket count, specify a value equal to or greater than the existing bucket count.\n", + "operationId": "updatePolarisManagedObjectStoreLocation", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "Polaris Managed ID of the archival location.", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "updated_definition", + "description": "Object containing information about the archival location.", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectStoreUpdateDefinition" + } + } + ], + "responses": { + "200": { + "description": "Returns the successfully updated archival location object.", + "schema": { + "$ref": "#/definitions/ObjectStoreLocationDetail" + } + } + }, + "x-group": "polaris_archival_location" + } + } + }, + "produces": [ + "application/json" + ], + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + }, + "Bearer": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "security": [ + { + "BasicAuth": [] + }, + { + "Bearer": [] + } + ], + "tags": [ + { + "name": "/archive", + "description": "INTERNAL Archival." + }, + { + "name": "/archive/location", + "description": "INTERNAL Archival locations." + }, + { + "name": "/archive/nfs", + "description": "INTERNAL NFS archival." + }, + { + "name": "/archive/qstar", + "description": "INTERNAL QSTAR achival." + }, + { + "name": "/authorization", + "description": "INTERNAL Authorization." + }, + { + "name": "/aws", + "description": "INTERNAL AWS native backup." + }, + { + "name": "/aws/account", + "description": "INTERNAL AWS account." + }, + { + "name": "/aws/account/dca", + "description": "INTERNAL DCA AWS account." + }, + { + "name": "/aws/ec2_instance", + "description": "INTERNAL AWS EC2 instance." + }, + { + "name": "/aws/hierarchy", + "description": "INTERNAL AWS hierarchy." + }, + { + "name": "/backup_throttle", + "description": "INTERNAL Backup throttle settings." + }, + { + "name": "/browse", + "description": "INTERNAL Browse." + }, + { + "name": "/cloud_on", + "description": "INTERNAL Cloud instantiation." + }, + { + "name": "/cluster", + "description": "INTERNAL Cluster configuration and health." + }, + { + "name": "/config", + "description": "INTERNAL Internal configuration." + }, + { + "name": "/data_location", + "description": "INTERNAL Replication locations." + }, + { + "name": "/diagnostic", + "description": "INTERNAL Diagnostic." + }, + { + "name": "/event_series", + "description": "INTERNAL Events series." + }, + { + "name": "/fileset", + "description": "INTERNAL File system filesets." + }, + { + "name": "/folder", + "description": "INTERNAL Folders." + }, + { + "name": "/graphql", + "description": "INTERNAL Graphql." + }, + { + "name": "/host", + "description": "INTERNAL Linux hosts and Windows hosts." + }, + { + "name": "/host/async", + "description": "INTERNAL Async host requests." + }, + { + "name": "/host/bulk/volume_filter_driver", + "description": "INTERNAL Host volume filter driver." + }, + { + "name": "/host/share", + "description": "INTERNAL Network shares." + }, + { + "name": "/host/share_credential", + "description": "INTERNAL operations on network shares." + }, + { + "name": "/host_fileset", + "description": "INTERNAL physical host fileset." + }, + { + "name": "/hierarchy", + "x-displayName": "/hierarchy", + "description": "INTERNAL Hierarchy." + }, + { + "name": "/hyperv/cluster", + "x-displayName": "/hyperv/cluster", + "description": "INTERNAL Hyper-V clusters." + }, + { + "name": "/hyperv/hierarchy", + "x-displayName": "/hyperv/hierarchy", + "description": "INTERNAL Hyper-V hierarchy." + }, + { + "name": "/hyperv/host", + "x-displayName": "/hyperv/host", + "description": "INTERNAL Hyper-V hosts." + }, + { + "name": "/hyperv/scvmm", + "x-displayName": "/hyperv/scvmm", + "description": "INTERNAL Hyper-V SCVMM." + }, + { + "name": "/hyperv/vm", + "x-displayName": "/hyperv/vm", + "description": "INTERNAL Hyper-V virtual machines." + }, + { + "name": "/job", + "description": "INTERNAL Jobs." + }, + { + "name": "/lambda", + "description": "INTERNAL Lambda.", + "x-displayName": "/lambda" + }, + { + "name": "/log", + "description": "INTERNAL Logging interface." + }, + { + "name": "/managed_object", + "description": "INTERNAL Managed objects." + }, + { + "name": "/managed_volume", + "description": "INTERNAL Managed volumes." + }, + { + "name": "/mfa/rsa", + "description": "INTERNAL RSA mfa server." + }, + { + "name": "/mssql", + "description": "INTERNAL SQL Server instances and databases." + }, + { + "name": "/network_throttle", + "description": "INTERNAL network throttle settings." + }, + { + "name": "/node", + "description": "INTERNAL Nodes." + }, + { + "name": "/node_management", + "description": "INTERNAL Manage nodes." + }, + { + "name": "/notification_setting", + "description": "INTERNAL Notification settings." + }, + { + "name": "/nutanix/cluster", + "x-displayName": "/nutanix/cluster", + "description": "INTERNAL Nutanix clusters." + }, + { + "name": "/nutanix/hierarchy", + "x-displayName": "/nutanix/hierarchy", + "description": "INTERNAL Nutanix hierarchy." + }, + { + "name": "/nutanix/vm", + "x-displayName": "/nutanix/vm", + "description": "INTERNAL Nutanix virtual machines." + }, + { + "name": "/ods_configuration", + "x-displayName": "/ods_configuration", + "description": "INTERNAL, for managing on-demand snapshot configuration." + }, + { + "name": "/oracle", + "x-displayName": "/oracle", + "description": "INTERNAL Oracle." + }, + { + "name": "/oracle/hierarchy", + "x-displayName": "/oracle", + "description": "INTERNAL Oracle hierarchy." + }, + { + "name": "/organization", + "x-displayName": "organization", + "description": "INTERNAL operations on Organizations." + }, + { + "name": "/principal_search", + "description": "INTERNAL Security principals." + }, + { + "name": "/polaris", + "description": "INTERNAL Polaris." + }, + { + "name": "/polaris/app_blueprint", + "description": "INTERNAL Polaris app blueprint." + }, + { + "name": "/polaris/archive", + "description": "INTERNAL Polaris archive." + }, + { + "name": "/polaris/failover", + "description": "INTERNAL Polaris failover." + }, + { + "name": "/polaris/nas", + "description": "INTERNAL Polaris NAS." + }, + { + "name": "/polaris/replication", + "description": "INTERNAL Polaris replication." + }, + { + "name": "/polaris/replication/source", + "description": "INTERNAL Polaris replication source." + }, + { + "name": "/polaris/report", + "description": "INTERNAL Polaris report." + }, + { + "name": "/polaris/user_audit_log_capture", + "description": "INTERNAL Polaris audit log." + }, + { + "name": "/polaris_sla_domain", + "description": "INTERNAL Polaris SLA domains." + }, + { + "name": "/replication", + "description": "INTERNAL Replication." + }, + { + "name": "/report", + "description": "INTERNAL Reports." + }, + { + "name": "/sap", + "description": "INTERNAL SAP." + }, + { + "name": "/search", + "description": "INTERNAL Search." + }, + { + "name": "/session", + "description": "INTERNAL Session." + }, + { + "name": "/sla_domain", + "description": "INTERNAL SLA Domains." + }, + { + "name": "/smb", + "description": "INTERNAL SMB shares." + }, + { + "name": "/smtp_instance", + "description": "INTERNAL SMTP instance for sending emails." + }, + { + "name": "/snapshot", + "description": "INTERNAL Snapshots." + }, + { + "name": "/stats", + "description": "INTERNAL Timeseries stats." + }, + { + "name": "/storage/array", + "description": "INTERNAL Storage arrays." + }, + { + "name": "/support", + "description": "INTERNAL Support operations." + }, + { + "name": "/syslog", + "description": "INTERNAL Syslog settings." + }, + { + "name": "/unmanaged_object", + "description": "INTERNAL Unmanaged objects." + }, + { + "name": "/user", + "description": "INTERNAL User management." + }, + { + "name": "/vcd/cluster", + "description": "INTERNAL vCloud cluster." + }, + { + "name": "/vcd/hierarchy", + "description": "INTERNAL vCloud hierarchy." + }, + { + "name": "/vcd/vapp", + "description": "INTERNAL vCloud vAPP." + }, + { + "name": "/vmware/agent", + "description": "INTERNAL Vmware agent." + }, + { + "name": "/vmware/config", + "description": "INTERNAL Vmware config." + }, + { + "name": "/vmware/compute_cluster", + "description": "INTERNAL VMware compute clusters." + }, + { + "name": "/vmware/data_center", + "description": "INTERNAL VMware data center." + }, + { + "name": "/vmware/datastore", + "description": "INTERNAL VMware data store." + }, + { + "name": "/vmware/guest_credential", + "description": "INTERNAL VMware Guest OS credentials." + }, + { + "name": "/vmware/standalone_host", + "description": "INTERNAL VMware ESXi hosts." + }, + { + "name": "/vmware/vcenter", + "description": "INTERNAL VMware vCenter Server." + }, + { + "name": "/vmware/vm", + "description": "INTERNAL VMware virtual machines." + }, + { + "name": "/volume_group", + "description": "INTERNAL Volume Groups." + } + ], + "definitions": { + "AuthorizationSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AuthorizationSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "MssqlAvailabilityGroupSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/MssqlAvailabilityGroupSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "MssqlInstanceSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/MssqlInstanceSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixClusterSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NutanixClusterSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixContainerListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NutanixContainer" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixVmSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NutanixVmSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixVmSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NutanixVmSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixClusterConfig": { + "type": "object", + "required": [ + "caCerts", + "hostname", + "nutanixClusterUuid", + "password", + "username" + ], + "properties": { + "hostname": { + "type": "string", + "description": "Address for the Prism host. Any Prism Element or Prism central host will do. We will use the highly available IP, if set, and this address, if not, to communicate with the cluster." + }, + "nutanixClusterUuid": { + "type": "string", + "description": "The UUID of the Nutanix cluster being added. This is required because Prism Central may manage multiple clusters, and we need to differentiate between them." + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "caCerts": { + "type": "string", + "description": "Concatenated X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + } + } + }, + "NutanixClusterDetail": { + "allOf": [ + { + "$ref": "#/definitions/NutanixClusterSummary" + }, + { + "type": "object", + "required": [ + "caCerts", + "connectionStatus" + ], + "properties": { + "caCerts": { + "type": "string", + "description": "Concatenated X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + }, + "connectionStatus": { + "description": "Connection status of a Nutanix Cluster.", + "$ref": "#/definitions/RefreshableObjectConnectionStatus" + } + } + } + ] + }, + "NutanixClusterPatch": { + "type": "object", + "properties": { + "hostname": { + "type": "string", + "description": "Address for the Prism host. Any Prism Element or Prism central host will do. We will use the highly available IP, if set, and this address, if not, to communicate with the cluster." + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "caCerts": { + "type": "string", + "description": "Concatenated X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "ID of the SLA Domain that is configured for this Nutanix Cluster. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + } + } + }, + "NutanixClusterSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "hostname", + "naturalId", + "username" + ], + "properties": { + "naturalId": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "username": { + "type": "string" + }, + "connectionStatus": { + "description": "Connection status of a Nutanix Cluster.", + "$ref": "#/definitions/RefreshableObjectConnectionStatus" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "NutanixContainer": { + "type": "object", + "required": [ + "name", + "naturalId" + ], + "properties": { + "naturalId": { + "type": "string", + "description": "Natural ID of the Nutanix container." + }, + "name": { + "type": "string", + "description": "Name of the Nutanix container." + } + } + }, + "NutanixDownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array containing the full source path of each file and folder that is part of the download job. The array must contain at least one path. When the source is a Windows virtual machine, the paths must all be on the same disk.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "NutanixRestoreFileConfig": { + "type": "object", + "required": [ + "path", + "restorePath" + ], + "properties": { + "path": { + "type": "string", + "description": "Absolute file path." + }, + "restorePath": { + "type": "string", + "description": "Target folder for the copied files." + } + } + }, + "NutanixRestoreFilesConfig": { + "type": "object", + "required": [ + "restoreConfig" + ], + "properties": { + "restoreConfig": { + "type": "array", + "description": "Directory of folder to copy files into.", + "items": { + "$ref": "#/definitions/NutanixRestoreFileConfig" + } + } + } + }, + "NutanixVirtualDiskSummary": { + "type": "object", + "properties": { + "uuid": { + "type": "string", + "description": "UUID of the disk." + }, + "sizeInBytes": { + "type": "integer", + "format": "int64", + "description": "Size of the virtual disk in bytes." + }, + "deviceType": { + "type": "string", + "description": "Type of virtual disk." + } + } + }, + "NutanixVmDetail": { + "allOf": [ + { + "$ref": "#/definitions/NutanixVmPatch" + }, + { + "$ref": "#/definitions/NutanixVmSummary" + }, + { + "$ref": "#/definitions/BlackoutWindowResponseInfo" + }, + { + "type": "object", + "required": [ + "excludedDiskIds", + "isAgentRegistered", + "isPaused", + "virtualDisks" + ], + "properties": { + "isPaused": { + "type": "boolean", + "description": "Whether backup/archival/replication is paused for this System Volume." + }, + "isAgentRegistered": { + "type": "boolean", + "description": "Returns whether the Rubrik connector is installed and service is registered." + }, + "virtualDisks": { + "type": "array", + "description": "Information of all the virtual disks for this virtual machine.", + "items": { + "$ref": "#/definitions/NutanixVirtualDiskSummary" + } + }, + "excludedDiskIds": { + "type": "array", + "description": "A list of virtual disk IDs to exclude from the backup for this virtual machine.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "NutanixVmExportSnapshotJobConfig": { + "type": "object", + "required": [ + "containerNaturalId" + ], + "properties": { + "containerNaturalId": { + "type": "string", + "description": "The natural ID of the container that will store the export VM's disks." + }, + "nutanixClusterId": { + "type": "string", + "description": "The ID of the Nutanix cluster to export to. If not specified, we will default to the VM's cluster." + }, + "vmName": { + "type": "string", + "description": "name of the new VM for export." + }, + "powerOn": { + "type": "boolean", + "description": "Whether the VM should be powered on after export. Default value is true." + }, + "removeNetworkDevices": { + "type": "boolean", + "description": "Determines whether to remove the network interfaces from the exported virtual machine. Set to 'true' to remove all network interfaces. The default value is 'false'. If 'false' the export job will attempt to add nics that were both present at snapshot time and connected to networks that are still present on the target cluster.", + "default": false + } + } + }, + "NutanixVmPatch": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this VM to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "isPaused": { + "type": "boolean", + "description": "Whether backup/archival/replication is paused for this VM." + }, + "snapshotConsistencyMandate": { + "description": "Consistency level mandated for this VM.", + "$ref": "#/definitions/NutanixSnapshotConsistencyMandate" + }, + "excludedDiskIds": { + "type": "array", + "description": "A list of virtual disks IDs to exclude from the backup for this virtual machine.", + "items": { + "type": "string" + } + } + } + }, + "NutanixVmSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/NutanixVmSnapshotSummary" + } + ] + }, + "NutanixVmSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "vmName" + ], + "properties": { + "vmName": { + "type": "string" + } + } + } + ] + }, + "NutanixVmSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "id", + "isRelic", + "name", + "snapshotConsistencyMandate" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "nutanixClusterId": { + "type": "string", + "description": "The ID of the Nutanix cluster to which this VM belongs." + }, + "nutanixClusterName": { + "type": "string", + "description": "The name of the Nutanix cluster to which this VM belongs." + }, + "isRelic": { + "type": "boolean", + "description": "Whether this Nutanix VM is currently present on the Nutanix cluster." + }, + "snapshotConsistencyMandate": { + "description": "Consistency level mandated for this VM.", + "$ref": "#/definitions/NutanixSnapshotConsistencyMandate" + }, + "agentStatus": { + "description": "The status of the Rubrik Backup Service agent for Nutanix virtual machines.", + "$ref": "#/definitions/AgentStatus" + }, + "operatingSystemType": { + "description": "The type of the operating system running on the Nutanix virtual machine.", + "$ref": "#/definitions/OperatingSystemType" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "AdaptiveThrottlingSettingsNutanixFields": { + "type": "object", + "properties": { + "nutanixThrottlingSettings": { + "$ref": "#/definitions/NutanixAdaptiveThrottlingSettings" + } + } + }, + "NutanixAdaptiveThrottlingSettings": { + "type": "object", + "properties": { + "ioLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold for throttling VM backups based on Nutanix VM latency, measured in milliseconds (ms)." + }, + "cpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold for throttling VM backups based on Nutanix VM CPU utilization, measured as a percentage of total CPU for the VM." + } + } + }, + "NutanixDataLocationUsage": { + "type": "object", + "required": [ + "numNutanixVmsArchived" + ], + "properties": { + "numNutanixVmsArchived": { + "type": "integer", + "format": "int32" + } + } + }, + "NutanixHierarchyObjectDescendantCount": { + "type": "object", + "properties": { + "cluster": { + "type": "integer", + "format": "int32" + }, + "vm": { + "type": "integer", + "format": "int32" + } + } + }, + "NutanixHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendantCount", + "isDeleted", + "objectType" + ], + "properties": { + "objectType": { + "type": "string", + "description": "Type of object.", + "enum": [ + "cluster", + "vm" + ] + }, + "descendantCount": { + "$ref": "#/definitions/NutanixHierarchyObjectDescendantCount" + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the Nutanix hierarchy object is deleted." + }, + "agentStatus": { + "description": "The status of the Rubrik Backup Service agent for Nutanix virtual machines.", + "$ref": "#/definitions/AgentStatus" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "NutanixHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NutanixHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NutanixSnapshotConsistencyMandate": { + "type": "string", + "description": "Consistency level mandated for this VM.", + "enum": [ + "Automatic", + "CrashConsistent", + "ApplicationConsistent" + ] + }, + "NutanixVmSlaObjectCount": { + "type": "object", + "properties": { + "numNutanixVms": { + "type": "integer", + "format": "int32", + "description": "The number of Nutanix virtual machines protected under this SLA domain." + } + } + }, + "ThrottlingSettings": { + "allOf": [ + { + "$ref": "#/definitions/AdaptiveThrottlingSettingsNutanixFields" + }, + { + "$ref": "#/definitions/AdaptiveThrottlingSettingsFilesetFields" + }, + { + "$ref": "#/definitions/AdaptiveThrottlingSettingsHypervFields" + }, + { + "$ref": "#/definitions/AdaptiveThrottlingSettingsMssqlFields" + }, + { + "$ref": "#/definitions/AdaptiveThrottlingSettingsVmwareFields" + }, + { + "type": "object", + "required": [ + "enableThrottling" + ], + "properties": { + "enableThrottling": { + "type": "boolean", + "description": "Whether or not to enable throttling." + } + } + } + ] + }, + "SubscriptionAttachmentType": { + "type": "string", + "description": "Email subscription attachment file type.", + "enum": [ + "Csv" + ] + }, + "SubscriptionOwner": { + "type": "object", + "required": [ + "userId", + "username" + ], + "properties": { + "userId": { + "type": "string", + "description": "User ID of the email subscription object owner." + }, + "username": { + "type": "string", + "description": "Username of the email subscription object owner." + } + } + }, + "SubscriptionScheduleTimeAttributes": { + "type": "object", + "properties": { + "dailyScheduleHour": { + "type": "integer", + "format": "int32", + "description": "Hour of the day to send the scheduled email, if the user picks a daily schedule. 0 = 12AM, 23 = 11PM." + }, + "weeklyScheduleHour": { + "type": "integer", + "format": "int32", + "description": "Hour of the user-specified day to send the scheduled email, if the user picks a weekly schedule. 0 = 12AM, 23 = 11PM." + }, + "daysOfWeek": { + "type": "array", + "description": "Day of the week, represented by numbers, to send the scheduled email, if the user picks a weekly schedule. 0 = Sunday, 1 = Monday, 2 = Tuesday, 3 = Wednesday, 4 = Thursday, 5 = Friday, 6 = Saturday.", + "items": { + "type": "integer", + "format": "int32" + } + }, + "monthlyScheduleHour": { + "type": "integer", + "format": "int32", + "description": "Hour of the user-specified day to send the scheduled email, if the user picks a monthly schedule. 0 = 12AM, 23 = 11PM." + }, + "dayOfMonth": { + "type": "integer", + "format": "int32", + "description": "Day of the month to send the scheduled email, if the user picks a monthly schedule." + } + } + }, + "SubscriptionStatus": { + "type": "string", + "description": "Status of email subscription.", + "enum": [ + "Active", + "Suspended", + "Unknown" + ] + }, + "InstanceFailoverInfo": { + "type": "object", + "required": [ + "originalDiskIdentifier" + ], + "properties": { + "originalDiskIdentifier": { + "type": "string", + "description": "The identifier used to map the original disks before failover to the disks being replicated. For vmware to AWS, this would be the deviceKey of the vmware virtual disk this EBS volume corresponds to." + } + } + }, + "ReplicationSnapshotDiskInfo": { + "type": "object", + "required": [ + "diskFailoverInfo", + "diskId", + "isOsDisk", + "logicalSizeInBytes", + "snapshotDiskId" + ], + "properties": { + "diskId": { + "type": "string", + "description": "The ID of the disk/volume that is being replicated." + }, + "snapshotDiskId": { + "type": "string", + "description": "The ID of the snapshot of the disk/volume taken on the source that needs to be replicated." + }, + "logicalSizeInBytes": { + "type": "integer", + "format": "int64", + "description": "Size of the disk/volume that is being replicated." + }, + "isOsDisk": { + "type": "boolean", + "description": "Flag to specify if the disk is OS disk." + }, + "diskFailoverInfo": { + "description": "Details specific to the target snappable required to failover the EBS volumes.", + "$ref": "#/definitions/InstanceFailoverInfo" + } + } + }, + "ReplicationSnapshotInfo": { + "type": "object", + "required": [ + "snappableId", + "snapshotId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "The ID of the snappable stored on this cluster." + }, + "snapshotId": { + "type": "string", + "description": "The ID of the snapshot that is being replicated." + }, + "snapshotDate": { + "type": "integer", + "format": "int64", + "description": "The date when the snapshot was taken in number of milliseconds since the UNIX epoch. This is a required field when the replication source is Polaris." + }, + "snapshotDiskInfos": { + "type": "array", + "description": "An array of the details of the snapshot disks that need to be replicated. This is a required field when the replication source is Polaris.", + "items": { + "$ref": "#/definitions/ReplicationSnapshotDiskInfo" + } + }, + "appMetadata": { + "type": "string", + "description": "Serialized metadata specific to the snappable which is being replicated. This is a required field when the replication source is Polaris." + }, + "childSnapshotInfos": { + "type": "array", + "description": "An array of child snapshots information.", + "items": { + "$ref": "#/definitions/ReplicationSnapshotInfo" + } + } + } + }, + "AgentDeploymentSettings": { + "type": "object", + "required": [ + "isAutomatic" + ], + "properties": { + "isAutomatic": { + "type": "boolean", + "description": "Determines whether the Rubrik cluster automatically deploys the Rubrik Backup Service to the guest OS at the first backup. Set to true to permit automatic deployment. Set to false to prevent automatic deployment." + } + } + }, + "AgentStatus": { + "type": "object", + "required": [ + "agentStatus" + ], + "properties": { + "agentStatus": { + "type": "string", + "description": "The agent connection status." + }, + "disconnectReason": { + "type": "string", + "description": "The reason the agent disconnected." + } + } + }, + "AppBlueprintChildCreate": { + "type": "object", + "required": [ + "bootPriority", + "childId" + ], + "properties": { + "childId": { + "type": "string", + "description": "The ID of the child virtual machine of the Blueprint." + }, + "bootPriority": { + "type": "integer", + "format": "int32", + "description": "An integer value representing the priority class of the child in the Blueprint. The lower the value of this integer, the higher the priority of the child while booting up the App.\n" + } + } + }, + "AppBlueprintChildDetail": { + "type": "object", + "required": [ + "bootPriority", + "id", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the child added to the Blueprint." + }, + "name": { + "type": "string", + "description": "The name of the child in the Blueprint." + }, + "bootPriority": { + "type": "integer", + "format": "int32", + "description": "The boot priority of the child of the Blueprint." + } + } + }, + "AppBlueprintChildSnappableExportSpec": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintChildSnappableLocalRecoverySpec" + }, + { + "type": "object", + "required": [ + "datastoreId" + ], + "properties": { + "datastoreId": { + "type": "string", + "description": "ID of the datastore to assign to the exported virtual machine." + }, + "shouldUnregisterVm": { + "type": "boolean", + "description": "Determines whether the new virtual machine created from a snapshot is registered with the vCenter Server. Use 'true' to remove the registration from vCenter Server. Use 'false' to keep the registration with the vCenter Server. The default is 'false'.", + "default": false + } + } + } + ] + }, + "AppBlueprintChildSnappableInstantRecoverySpec": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintChildSnappableLocalRecoverySpec" + }, + { + "type": "object", + "properties": { + "dataStoreName": { + "type": "string", + "description": "Name of the host NAS datastore to use with the new virtual machine. The Rubrik cluster creates a new datastore on the host using the provided name." + }, + "preserveMoid": { + "type": "boolean", + "description": "Determines whether to preserve the moid of the source virtual machine in a restore operation. Use 'true' to keep the moid of the source. Use 'false' to assign a new moid. The default is 'false'.", + "default": false + }, + "vlan": { + "type": "integer", + "format": "int32", + "description": "VLAN ID for the VLAN ESXi host prefer to use for mounting the datastore." + } + } + } + ] + }, + "AppBlueprintChildSnappableLocalRecoverySpec": { + "type": "object", + "required": [ + "originalVmId" + ], + "properties": { + "originalVmId": { + "type": "string", + "description": "ID of the VM whose snapshot is being restored." + }, + "vmName": { + "type": "string", + "description": "Name of the restored VM." + }, + "hostId": { + "type": "string", + "description": "ID of host for the restored VM to use." + }, + "disableNetwork": { + "type": "boolean", + "description": "A Boolean value that determines the state of the network interfaces when the virtual machine is mounted. When 'false', the network interfaces are enabled. When 'true', the network interfaces are disabled. Disabling the interfaces can prevent IP conflicts. The network interfaces are disabled by default.", + "default": false + }, + "removeNetworkDevices": { + "type": "boolean", + "description": "A Boolean value that determines whether to remove the network interfaces from the mounted virtual machine. When 'true,' all network interfaces are removed. When 'false,' network interfaces are maintained. The network interfaces are maintained by default.", + "default": false + }, + "shouldKeepMacAddresses": { + "type": "boolean", + "description": "Determines whether the MAC addresses of the network interfaces on the source virtual machine are assigned to the new virtual machine. Set to 'true' to assign the original MAC addresses to the new virtual machine. Set to 'false' to assign new MAC addresses. The default is 'false'. When removeNetworkDevices is set to true, this property is ignored.", + "default": true + } + } + }, + "AppBlueprintChildSnappableMountSpec": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintChildSnappableLocalRecoverySpec" + }, + { + "type": "object", + "properties": { + "dataStoreName": { + "type": "string", + "description": "Name of the host NAS datastore to use with the new virtual machine. The Rubrik cluster creates a new datastore on the host using the provided name." + }, + "vlan": { + "type": "integer", + "format": "int32", + "description": "VLAN ID for the VLAN ESXi host prefer to use for mounting the datastore." + } + } + } + ] + }, + "AppBlueprintChildSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintChildSnapshotSummary" + } + ] + }, + "AppBlueprintChildSnapshotSummary": { + "type": "object", + "required": [ + "childName", + "childSnapshotId" + ], + "properties": { + "childSnapshotId": { + "type": "string", + "description": "The ID of the virtual machine snapshot. snapshot." + }, + "childName": { + "type": "string", + "description": "Name of the virtual machine object of the snapshot." + }, + "indexState": { + "type": "integer", + "format": "int64", + "description": "An integer value that represents the state of a snapshot indexing job. 0 means that indexing is in progress or has not started. 1 means that indexing has completed successfully. 2 means that the indexer has failed to process the snapshot.\n" + } + } + }, + "AppBlueprintCreate": { + "type": "object", + "required": [ + "children", + "name", + "polarisAppBlueprintId", + "version" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the Blueprint object." + }, + "polarisAppBlueprintId": { + "type": "string", + "description": "Polaris ID of the Blueprint." + }, + "version": { + "type": "integer", + "format": "int64", + "description": "Version of the Blueprint." + }, + "hasDrExpectation": { + "type": "boolean", + "description": "This represents whether the blueprint is created for DR use case. The DR target can be a CDM cluster or any supported Cloud source." + }, + "children": { + "type": "array", + "description": "An array of the detail information for the VMware virtual machines in the Blueprint.", + "items": { + "$ref": "#/definitions/AppBlueprintChildCreate" + } + } + } + }, + "AppBlueprintDetail": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintSummary" + }, + { + "type": "object", + "required": [ + "children" + ], + "properties": { + "children": { + "type": "array", + "description": "An array of the detail information for the VMware virtual machines in the Blueprint.", + "items": { + "$ref": "#/definitions/AppBlueprintChildDetail" + } + } + } + } + ] + }, + "AppBlueprintExportSnapshotJobConfig": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintRestoreSnapshotJobConfigBase" + }, + { + "type": "object", + "required": [ + "childrenToRestore", + "createNewApp" + ], + "properties": { + "newAppName": { + "type": "string", + "description": "Name of the restored Blueprint when the restore job creates a new Blueprint. This property is ignored when the export targets an existing Blueprint." + }, + "targetAppId": { + "type": "string", + "description": "ID of the target Blueprint when the restore job is exporting to an existing Blueprint. This property is ignored when the export creates a new Blueprint." + }, + "createNewApp": { + "type": "boolean", + "description": "A Boolean value that specifies whether to create a new App. When 'true,' the export creates a new App. When 'false,' the export does not create a new App." + }, + "childrenToRestore": { + "type": "array", + "description": "An array that contains the export specifications for the export of child snappables in a Blueprint snapshot.", + "items": { + "$ref": "#/definitions/AppBlueprintChildSnappableExportSpec" + } + }, + "polarisAppBlueprintId": { + "type": "string", + "description": "The Polaris ID of the Blueprint being exported. Only applies to new Blueprints." + } + } + } + ] + }, + "AppBlueprintInstantRecoveryJobConfig": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintRestoreSnapshotJobConfigBase" + }, + { + "type": "object", + "required": [ + "childrenToRestore" + ], + "properties": { + "childrenToRestore": { + "type": "array", + "description": "An array that contains the restore specifications for the instant recovery of child snappables in a Blueprint snapshot.", + "items": { + "$ref": "#/definitions/AppBlueprintChildSnappableInstantRecoverySpec" + } + }, + "shouldRemoveChildrenFromOtherAppBlueprints": { + "type": "boolean", + "description": "A Boolean value that specifies whether to remove child snappables that are part of other Apps from the other App. When this value is 'true,' the child snappables are removed from other Apps and are restored in the App being restored. When this value is 'false,' the child snappables that are running in other Apps remain in those Apps and are not added to the App being restored.", + "default": false + }, + "shouldRemoveChildrenFromAppBlueprint": { + "type": "boolean", + "description": "A Boolean value that specifies whether all other child snappables that are not part of childrenToRestore are removed from the Blueprint when recovering from a snapshot. When this value is 'true,' those child snappables are removed from the Blueprint. By default, this value is 'false.'", + "default": false + } + } + } + ] + }, + "AppBlueprintMountSnapshotJobConfig": { + "allOf": [ + { + "$ref": "#/definitions/AppBlueprintRestoreSnapshotJobConfigBase" + }, + { + "type": "object", + "required": [ + "childrenToRestore", + "shouldCreateNewApp" + ], + "properties": { + "newAppName": { + "type": "string", + "description": "Name of the mounted Blueprint." + }, + "createDatastoreOnly": { + "type": "boolean", + "description": "The child jobs create datastores that contain the VMDKs, but do not create the corresponding virtual machines." + }, + "targetAppBlueprintId": { + "type": "string", + "description": "If not creating a new App, this field should contain the ID of the App to which we add the mounted snappables." + }, + "shouldCreateNewApp": { + "type": "boolean", + "description": "A Boolean value that specifies whether to create a new App. When 'true', the mount job creates a new App. When 'false', the mount job does not create a new App." + }, + "polarisAppBlueprintId": { + "type": "string", + "description": "Polaris ID of the Blueprint being mounted if it is a new Blueprint." + }, + "childrenToRestore": { + "type": "array", + "description": "An array that contains the restore specifications for mounting child snappables in Blueprint snapshot.", + "items": { + "$ref": "#/definitions/AppBlueprintChildSnappableMountSpec" + } + } + } + } + ] + }, + "AppBlueprintPatch": { + "type": "object", + "required": [ + "version" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the Blueprint object." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "The ID of the SLA Domain managing protection for the Blueprint. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "version": { + "type": "integer", + "format": "int64", + "description": "Version of the Blueprint." + }, + "hasDrExpectation": { + "type": "boolean", + "description": "This represents whether the blueprint is created for DR use case. The DR target can be a CDM cluster or any supported Cloud source." + }, + "children": { + "type": "array", + "description": "An array of the detail information for the VMware virtual machines in the Blueprint.", + "items": { + "$ref": "#/definitions/AppBlueprintChildCreate" + } + } + } + }, + "AppBlueprintPolarisLink": { + "type": "object", + "required": [ + "link" + ], + "properties": { + "link": { + "type": "string", + "description": "Link to the URL to view the Blueprint detail page on Polaris." + } + } + }, + "AppBlueprintRestoreSnapshotJobConfigBase": { + "type": "object", + "properties": { + "shouldPowerOn": { + "type": "boolean", + "description": "A Boolean value that indicates whether to power on the child virtual machines of a Blueprint after restoring the virtual machines. When this value is 'true,' the child virtual machines are powered on after being restored.", + "default": true + } + } + }, + "AppBlueprintSearchResponse": { + "allOf": [ + { + "$ref": "#/definitions/SearchResponse" + }, + { + "type": "object", + "required": [ + "childObjectId", + "childObjectName" + ], + "properties": { + "childObjectId": { + "type": "string", + "description": "The ID of the Blueprint child virtual machine where the file is located." + }, + "childObjectName": { + "type": "string", + "description": "name of the child virtual machine of the Blueprint where this file was found." + } + } + } + ] + }, + "AppBlueprintSearchResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AppBlueprintSearchResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AppBlueprintSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "$ref": "#/definitions/AppBlueprintSnapshotSummaryDetailSharedFields" + }, + { + "type": "object", + "required": [ + "childSnapshots" + ], + "properties": { + "childSnapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/AppBlueprintChildSnapshotDetail" + } + } + } + } + ] + }, + "AppBlueprintSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "$ref": "#/definitions/AppBlueprintSnapshotSummaryDetailSharedFields" + }, + { + "type": "object", + "required": [ + "childSnapshots" + ], + "properties": { + "childSnapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/AppBlueprintChildSnapshotSummary" + } + } + } + } + ] + }, + "AppBlueprintSnapshotSummaryDetailSharedFields": { + "type": "object", + "required": [ + "appBlueprintName" + ], + "properties": { + "appBlueprintName": { + "type": "string", + "description": "Name of the Blueprint that is the source of the snapshot." + }, + "isSynchronized": { + "type": "boolean", + "description": "Boolean value that indicates whether a vApp snapshot is synchronized with the source vApp. Set to 'true' when the vApp snapshot is synchronized, and set to 'false' when it is not synchronized." + } + } + }, + "AppBlueprintSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AppBlueprintSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AppBlueprintSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for App Blueprint or App hierarchy objects.", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment", + "ConnectionStatus" + ] + }, + "AppBlueprintSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "id", + "name", + "polarisAppBlueprintId", + "version" + ], + "properties": { + "id": { + "type": "string", + "description": "Blueprint ID." + }, + "polarisAppBlueprintId": { + "type": "string", + "description": "Polaris ID of the Blueprint." + }, + "name": { + "type": "string", + "description": "Blueprint name." + }, + "isRelic": { + "type": "boolean", + "description": "Boolean value that indicates whether a Blueprint is deleted. Set to 'true' when the Blueprint has been deleted and 'false' when the Blueprint is present." + }, + "version": { + "type": "integer", + "format": "int64", + "description": "Version of the Blueprint." + } + } + } + ] + }, + "AppBlueprintSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AppBlueprintSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DeployScriptsConfig": { + "type": "object", + "properties": { + "eventSeriesId": { + "type": "string", + "description": "This event series ID should be used to send events." + } + } + }, + "ManagedObjectDescendantCountAppBlueprintFields": { + "type": "object", + "properties": { + "appBlueprint": { + "type": "integer", + "format": "int32", + "description": "Number of Blueprints." + } + } + }, + "BatchAsyncWithRequestContext": { + "type": "object", + "required": [ + "batch", + "context" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "batch": { + "description": "Batch of asynchronous requests to poll the status.", + "$ref": "#/definitions/BatchAsyncRequest" + } + } + }, + "FreezeSnappablesConfig": { + "type": "object", + "required": [ + "context", + "snappables" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "snappables": { + "type": "array", + "description": "A list of information about the protectable objects being frozen.", + "items": { + "$ref": "#/definitions/SnappableConfig" + } + } + } + }, + "HydrationCleanupRequest": { + "type": "object", + "required": [ + "context", + "hydrationCleanupSpecs" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "hydrationCleanupSpecs": { + "type": "array", + "description": "A list of snappable hydration cleanup spec.", + "items": { + "$ref": "#/definitions/HydrationCleanupSpec" + } + } + } + }, + "HydrationCleanupSpec": { + "type": "object", + "required": [ + "SnappableManagedIdToBeRemoved", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the snappable." + }, + "SnappableManagedIdToBeRemoved": { + "type": "string", + "description": "The managed ID of the snappable to be cleaned up." + } + } + }, + "HydrationFinalizeRequest": { + "allOf": [ + { + "$ref": "#/definitions/SnappableBatchRequest" + } + ] + }, + "HydrationRequest": { + "type": "object", + "required": [ + "context", + "hydrationSpecs" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "pinId": { + "type": "string", + "description": "Pin ID of the snappables to be hydrated." + }, + "hydrationSpecs": { + "type": "array", + "description": "A list of snappable hydration specs.", + "items": { + "$ref": "#/definitions/HydrationSpec" + } + } + } + }, + "HydrationResponse": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "A list of snappable hydration responses.", + "items": { + "$ref": "#/definitions/SnappableHydrationResponse" + } + } + } + }, + "HydrationSpec": { + "type": "object", + "required": [ + "hydrationSpecHash", + "shouldForceFull", + "snappableHydrationSpec", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the snappable." + }, + "snappableHydrationSpec": { + "description": "The snappable hydration spec.", + "$ref": "#/definitions/SnappableHydrationSpec" + }, + "shouldForceFull": { + "type": "boolean", + "description": "Enables or disables full hydration of the snappable. If true will force a full hydration of snapshot identified by the hydrationSpecHash. It also removes the old hydrated vm if it exists.", + "default": false + }, + "hydrationSpecHash": { + "type": "string", + "description": "The hydration spec hash used to uniquely identify a version of the spec. This has to be unique across all versions of the spec. We would recommend a sha1 hash on the relevant fields of the spec." + } + } + }, + "HydrationSpecUnsetRequest": { + "allOf": [ + { + "$ref": "#/definitions/SnappableBatchRequest" + } + ] + }, + "JobInvalidStateType": { + "type": "string", + "description": "It represents the invalid job states for batch state queries.", + "enum": [ + "NotFound", + "Unknown" + ] + }, + "LocalRecoveryRequest": { + "type": "object", + "required": [ + "context", + "localRecoverySpecs" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "pinId": { + "type": "string", + "description": "Pin ID of the snappables for local recovery." + }, + "localRecoverySpecs": { + "type": "array", + "description": "A list of snappable local recovery specs.", + "items": { + "$ref": "#/definitions/LocalRecoverySpec" + } + } + } + }, + "LocalRecoveryResponse": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "A list of snappable local recovery responses.", + "items": { + "$ref": "#/definitions/SnappableLocalRecoveryResponse" + } + } + } + }, + "LocalRecoverySpec": { + "type": "object", + "required": [ + "recoveryType", + "snappableLocalRecoverySpec", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The snappable managed id of the VM to be recovered." + }, + "snapshotId": { + "type": "string", + "description": "ID of the snapshot to recover." + }, + "recoveryType": { + "description": "The type of Local Recovery. Current option is in-place.", + "$ref": "#/definitions/LocalRecoveryType" + }, + "snappableLocalRecoverySpec": { + "description": "The snappable local recovery spec.", + "$ref": "#/definitions/SnappableLocalRecoverySpec" + } + } + }, + "LocalRecoveryType": { + "type": "string", + "description": "It represents the local recovery type.", + "enum": [ + "InPlace", + "Export" + ] + }, + "LogHydrationRequest": { + "type": "object", + "required": [ + "context", + "logHydrationSpecs", + "pinId" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "pinId": { + "type": "string", + "description": "Pin ID of the logs to be replayed." + }, + "logHydrationSpecs": { + "type": "array", + "description": "A list of snappable log hydration specs.", + "items": { + "$ref": "#/definitions/LogHydrationSpec" + } + } + } + }, + "LogHydrationResponse": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "A list of snappable log hydration responses.", + "items": { + "$ref": "#/definitions/SnappableLogHydrationResponse" + } + } + } + }, + "LogHydrationSpec": { + "type": "object", + "required": [ + "recoveryTimeStampInMs", + "snappableManagedId", + "snappableManagedIdToReplayOnto" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the source snappable." + }, + "snappableManagedIdToReplayOnto": { + "type": "string", + "description": "The managed ID of the snappable to replay logs onto." + }, + "recoveryTimeStampInMs": { + "type": "integer", + "format": "int64", + "description": "The recovery timestamp in millisecond. If the timestamp corresponds to a snapshot the log replay operation shall become a noop." + } + } + }, + "NetworkAdapterType": { + "type": "string", + "description": "It represents the nework adapter type for vmware.", + "enum": [ + "e1000", + "e1000e", + "pcnet32", + "vmxnet", + "vmxnet2", + "vmxnet3" + ] + }, + "NetworkType": { + "type": "string", + "description": "It represents the network type.", + "enum": [ + "Dhcp", + "StaticIp" + ] + }, + "PostFailoverSnappableConfig": { + "type": "object", + "required": [ + "originalSnappableManagedId", + "pinId", + "recoveredSnappableManagedId" + ], + "properties": { + "recoveredSnappableManagedId": { + "type": "string", + "description": "The snappable managed id of the recovered VM." + }, + "originalSnappableManagedId": { + "type": "string", + "description": "The snappable managed id of the original VM." + }, + "pinId": { + "type": "string", + "description": "Pin ID of the failover pinned." + } + }, + "description": "This contains the post failover config for a snappable." + }, + "PostFailoverSnappablesConfig": { + "type": "object", + "required": [ + "context", + "postFailoverSnappablesConfig" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "postFailoverSnappablesConfig": { + "type": "array", + "description": "A list of snappables with post failover config.", + "items": { + "$ref": "#/definitions/PostFailoverSnappableConfig" + } + } + } + }, + "PostScriptArgument": { + "type": "object", + "required": [ + "postScript", + "postScriptAttributes" + ], + "properties": { + "postScript": { + "type": "string", + "description": "This contains the post script given by the customer." + }, + "postScriptAttributes": { + "type": "array", + "description": "A list of key value attributes which will be injected into the post script.", + "items": { + "$ref": "#/definitions/PostScriptAttribute" + } + } + } + }, + "PostScriptAttribute": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "type": "string", + "description": "The key of the attribute." + }, + "value": { + "type": "string", + "description": "The value of the attribute." + } + } + }, + "PostScriptSnappableConfig": { + "type": "object", + "required": [ + "id", + "postScriptArgument" + ], + "properties": { + "id": { + "type": "string", + "description": "The snappable managed id of the recovered VM." + }, + "postScriptArgument": { + "description": "This contains the post script and attributes.", + "$ref": "#/definitions/PostScriptArgument" + } + }, + "description": "This contains the post script config for a snappable." + }, + "PostScriptSnappablesConfig": { + "type": "object", + "required": [ + "context", + "postscriptSnappablesConfig" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "postscriptSnappablesConfig": { + "type": "array", + "description": "A list of snappables with post script config.", + "items": { + "$ref": "#/definitions/PostScriptSnappableConfig" + } + } + } + }, + "ReconfigurationSnappableConfig": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The snappable managed id of the recovered VM." + }, + "vmwareReconfigureSpec": { + "description": "This contains the Vmware VM specific reconfiguration spec. This should only be passed if snappable type is VmwareVm.", + "$ref": "#/definitions/VmwareRecoverySpec" + } + }, + "description": "This contains the recovery config for a snappable." + }, + "ReconfigurationSnappablesConfig": { + "type": "object", + "required": [ + "context", + "reconfigurationSnappablesConfig" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "reconfigurationSnappablesConfig": { + "type": "array", + "description": "A list of snappables with reconfiguration config.", + "items": { + "$ref": "#/definitions/ReconfigurationSnappableConfig" + } + } + } + }, + "RecoveryCleanupSnappablesConfig": { + "type": "object", + "required": [ + "context", + "snappables" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "snappables": { + "type": "array", + "description": "A list of information about protectable objects for which recovery cleanup has to be performed.", + "items": { + "$ref": "#/definitions/SnappableConfig" + } + } + } + }, + "RenameSnappableConfig": { + "type": "object", + "required": [ + "newName", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "ID of the protectable object." + }, + "newName": { + "type": "string", + "description": "New name of the protectable object." + } + } + }, + "RenameSnappablesConfig": { + "type": "object", + "required": [ + "context", + "renameEntries" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "renameEntries": { + "type": "array", + "description": "A list of information about the protectable objects being renamed.", + "items": { + "$ref": "#/definitions/RenameSnappableConfig" + } + } + } + }, + "RequestContext": { + "type": "object", + "required": [ + "parentId", + "requestId" + ], + "properties": { + "parentId": { + "type": "string", + "description": "The ID of a job in Polaris. The job ID enables tracing for the API calls used by a Polaris job." + }, + "requestId": { + "type": "string", + "description": "The unique ID of a job request. Polaris jobs can issue multiple api calls with the same requestId. This needs to be globally unique so it can be used to implement idempotency." + } + } + }, + "SnappableBatchRequest": { + "type": "object", + "required": [ + "context", + "snappableManagedIds" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "snappableManagedIds": { + "type": "array", + "description": "A list of managed IDs of corresponding snappables.", + "items": { + "type": "string" + } + } + } + }, + "SnappableConfig": { + "type": "object", + "required": [ + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "ID of the protectable object." + } + } + }, + "SnappableHydrationResponse": { + "type": "object", + "required": [ + "asyncStatus", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the snappable." + }, + "asyncStatus": { + "description": "Per snappable hydration job status.", + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "SnappableHydrationSpec": { + "type": "object", + "properties": { + "vmwareHydrationSpec": { + "description": "The hydration spec for a vmware snappable.", + "$ref": "#/definitions/VmwareHydrationSpec" + } + } + }, + "SnappableLocalRecoveryResponse": { + "type": "object", + "required": [ + "asyncRequestStatus", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the snappable." + }, + "asyncRequestStatus": { + "description": "Per snappable local recovery job status.", + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "SnappableLocalRecoverySpec": { + "type": "object", + "properties": { + "vmwareInplaceSpec": { + "description": "The local recovery spec for a vmware snappable.", + "$ref": "#/definitions/VmwareInplaceSpec" + } + } + }, + "SnappableLogHydrationResponse": { + "type": "object", + "required": [ + "asyncStatus", + "snappableManagedId" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the source snappable." + }, + "asyncStatus": { + "description": "Per snappable log hydration job status.", + "$ref": "#/definitions/AsyncRequestStatus" + } + } + }, + "SnappableOperationResponse": { + "type": "object", + "required": [ + "snappableManagedId", + "status" + ], + "properties": { + "snappableManagedId": { + "type": "string", + "description": "The managed ID of the protectable object." + }, + "status": { + "description": "Operation status.", + "$ref": "#/definitions/SnappableOperationResponseStatus" + }, + "failureReason": { + "type": "string", + "description": "Operation failure cause." + } + } + }, + "SnappableOperationResponseStatus": { + "type": "string", + "description": "Object store type.", + "enum": [ + "Success", + "Failure", + "Undefined" + ] + }, + "SnappableRecoveryConfig": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The managed snappable id." + }, + "recoveryTimeStampInMs": { + "type": "integer", + "format": "int64", + "description": "The recovery timestamp in millisecond. If we want to recover from a snapshot, pass the snapshotId. It is mutually exclusive with snapshotId." + }, + "snapshotId": { + "type": "string", + "description": "ID of the snapshot to recover. This should only be needed if we want to recover from a snapshot." + } + } + }, + "SnappableType": { + "type": "string", + "description": "It represents the snappable type.", + "enum": [ + "Vmware", + "Sql" + ] + }, + "SnappablesOperationResponse": { + "type": "object", + "required": [ + "asyncStatus", + "operationResponses" + ], + "properties": { + "asyncStatus": { + "description": "Job status.", + "$ref": "#/definitions/AsyncRequestStatus" + }, + "operationResponses": { + "type": "array", + "description": "The response list for the operation.", + "items": { + "$ref": "#/definitions/SnappableOperationResponse" + } + } + } + }, + "SnappablesRecoveryConfig": { + "type": "object", + "required": [ + "context", + "pinId", + "snappablesRecoveryConfig" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "pinId": { + "type": "string", + "description": "This ID will be used to store references and then can be used to cleanup the references." + }, + "snappablesRecoveryConfig": { + "type": "array", + "description": "A list of snappables with recovery config.", + "items": { + "$ref": "#/definitions/SnappableRecoveryConfig" + } + } + } + }, + "UnpinConfig": { + "type": "object", + "required": [ + "context", + "pinId" + ], + "properties": { + "context": { + "description": "Context for this request.", + "$ref": "#/definitions/RequestContext" + }, + "pinId": { + "type": "string", + "description": "This ID will be used to clean up the references." + } + } + }, + "VmwareHydrationSpec": { + "type": "object", + "required": [ + "datastoreId", + "originalVmName", + "shouldUseLinkedClone", + "vmMemoryInMB" + ], + "properties": { + "originalVmName": { + "type": "string", + "description": "Name of the original VM that we are using snapshots to hydrate." + }, + "newVmName": { + "type": "string", + "description": "Name of the new VM created by hydration." + }, + "shouldPowerOn": { + "type": "boolean", + "description": "Determines whether the virtual machine should be powered on after hydration. Set to 'true' to power on the virtual machine. Set to 'false' to hydrate the virtual machine but not power it on.", + "default": false + }, + "recoveryPoint": { + "type": "string", + "format": "date-time", + "description": "Point in time to recover to." + }, + "snapshotId": { + "type": "string", + "description": "ID of the snapshot to recover." + }, + "hostId": { + "type": "string", + "description": "ID of the ESXi host to hydrate the new virtual machine to." + }, + "datastoreId": { + "type": "string", + "description": "ID of the datastore to assign to the hydrated virtual machine." + }, + "resourcePoolId": { + "type": "string", + "description": "ID of the resource pool to hydrate the new virtual machine to." + }, + "clusterId": { + "type": "string", + "description": "ID of the cluster to hydrate the new virtual machine to." + }, + "vmMemoryInMB": { + "type": "integer", + "format": "int64", + "description": "The memory for the VM." + }, + "shouldRecoverTags": { + "type": "boolean", + "description": "The job recovers any tags that were assigned to the virtual machine.", + "default": false + }, + "shouldUseLinkedClone": { + "type": "boolean", + "description": "If true, then the hydration will create a linked clone VM and the Rubrik snapshot will be recovered on the linked clone instead. The caller is responsible for invoking the API to clean up the linked clone at the end of its life cycle.", + "default": false + } + } + }, + "VmwareInplaceSpec": { + "type": "object", + "properties": { + "shouldKeepSnapshotAfter": { + "type": "boolean", + "description": "This specifies whether to keep the current snapshot in the vCenter Server after in-place export." + } + } + }, + "VmwareNicInfo": { + "type": "object", + "required": [ + "isPrimary", + "networkAdapterType", + "networkMoid", + "networkType" + ], + "properties": { + "networkAdapterType": { + "description": "The network adapter type.", + "$ref": "#/definitions/NetworkAdapterType" + }, + "networkMoid": { + "type": "string", + "description": "The network MOID for this nic." + }, + "isPrimary": { + "type": "boolean", + "description": "This specifies whether this nic is the primary." + }, + "networkType": { + "description": "The type of network. It can be either Dhcp or Static.", + "$ref": "#/definitions/NetworkType" + }, + "staticIpInfo": { + "description": "This specifies the static ip config.", + "$ref": "#/definitions/StaticIpInfo" + } + }, + "description": "It describes the vmware nic information." + }, + "VmwareNicInfoWithDeviceKey": { + "type": "object", + "required": [ + "deviceKey", + "vmwareNicInfo" + ], + "properties": { + "deviceKey": { + "type": "integer", + "format": "int32", + "description": "The vmware network device key." + }, + "vmwareNicInfo": { + "description": "The vmware nic information.", + "$ref": "#/definitions/VmwareNicInfo" + } + }, + "description": "It describes the vmware nic info along with the network device key." + }, + "VmwareRecoverySpec": { + "type": "object", + "required": [ + "hypervisorDetail", + "recoveryPlanInfo", + "scriptInfo" + ], + "properties": { + "hypervisorDetail": { + "description": "It contians the hypervisor target details.", + "$ref": "#/definitions/VmwareRecoveryTarget" + }, + "recoveryPlanInfo": { + "description": "It contains the recovery plan information for VMs. For example, it will contain the network configuration, memory etc.", + "$ref": "#/definitions/VmwareVmRecoveryPlanInfo" + }, + "scriptInfo": { + "description": "Network and post script related information.", + "$ref": "#/definitions/VmwareScriptInfo" + } + }, + "description": "This represents the recovery spec for a Vmware VM. This contains the target location details as well as the network details." + }, + "VmwareRecoveryTarget": { + "type": "object", + "required": [ + "datastoreId", + "hostId", + "vCenterId" + ], + "properties": { + "hostId": { + "type": "string", + "description": "The host id where the VM is present." + }, + "datastoreId": { + "type": "string", + "description": "The datastore id where the VM is present." + }, + "vCenterId": { + "type": "string", + "description": "The vCenter id where the VM is present." + } + }, + "description": "This represents the hypervisor details." + }, + "VmwareScriptInfo": { + "type": "object", + "properties": { + "postScriptArgument": { + "description": "This contains the post script and attributes.", + "$ref": "#/definitions/PostScriptArgument" + }, + "networkScript": { + "type": "string", + "description": "This contains the network script. This is Rubrik specific script. This will be used to configure the network in the recovered VM." + } + } + }, + "VmwareVmRecoveryPlanInfo": { + "type": "object", + "required": [ + "memoryMB", + "networkDeviceKeyToNicInfo", + "vmName" + ], + "properties": { + "memoryMB": { + "type": "integer", + "format": "int64", + "description": "The memory for the VM." + }, + "vmName": { + "type": "string", + "description": "Name of the virtual machine. If a virtual machine with the same name already exists, the name of the new vitual machine name is appended with the timestamp." + }, + "networkDeviceKeyToNicInfo": { + "type": "array", + "description": "It contains the list of network device key and the nework nic info.", + "items": { + "$ref": "#/definitions/VmwareNicInfoWithDeviceKey" + } + } + }, + "description": "This represents the network recovery plan for a vmware VM which can be used for AppFlows purposes." + }, + "AccountCredentialsType": { + "type": "string", + "description": "Types of account credentials which are used for the associated object store location.\n", + "enum": [ + "KeyBased", + "CrossAccountRoleBased" + ] + }, + "ArchivalLocationConnectJob": { + "type": "object", + "required": [ + "dataLocationId", + "jobId", + "locationType", + "name" + ], + "properties": { + "dataLocationId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "jobId": { + "type": "string" + }, + "locationType": { + "type": "string" + }, + "host": { + "type": "string" + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<> For AWS, bucket name also cannot contain capital letters or underscore.\n" + } + } + }, + "ArchivalLocationConnectJobListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ArchivalLocationConnectJob" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ArchivalLocationSummary": { + "type": "object", + "required": [ + "currentState", + "id", + "isActive", + "isComputeEnabled", + "isConsolidationEnabled", + "isTieringSupported", + "locationType", + "name", + "ownershipStatus", + "rawName" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string", + "description": "Human-readable name of the archival location. Locations managed by Polaris have the suffix '(Managed By Polaris)'." + }, + "rawName": { + "type": "string", + "description": "Raw name of the archival location. Raw name does not contain the suffix '(Managed By Polaris)' for locations managed by Polaris." + }, + "locationType": { + "type": "string" + }, + "currentState": { + "type": "string", + "description": "Current connection state of this archival location." + }, + "isActive": { + "type": "boolean" + }, + "ipAddress": { + "type": "string" + }, + "polarisManagedId": { + "type": "string", + "description": "Optional field containing Polaris managed id of the Polaris managed Archival Locations. This field will be set only if the Archival Location is Polaris managed." + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<> For AWS, bucket name also cannot contain capital letters or underscore.\n" + }, + "ownershipStatus": { + "$ref": "#/definitions/DataLocationOwnershipStatus" + }, + "isRetentionLockedSnapshotPresent": { + "type": "boolean", + "description": "A Boolean that specifies whether a location contains snapshots that are protected by a Retention Lock SLA Domain. When this value is 'true,' the location contains snapshots protected by a Retention Lock SLA Domain." + }, + "isLegalHoldSnapshotPresent": { + "type": "boolean", + "description": "A Boolean that specifies whether a location contains snapshots that are placed on Legal Hold. When this value is 'true', the location contains snapshots that are placed on Legal Hold.\n" + }, + "isComputeEnabled": { + "type": "boolean", + "description": "A Boolean that specifies whether cloud compute is enabled for this location. When this value is 'true', cloud compute is enabled for this location.\n" + }, + "isTieringSupported": { + "type": "boolean", + "description": "Specifies whether the location supports tiering. When this value is 'true', the location supports tiering.\n" + }, + "isConsolidationEnabled": { + "type": "boolean", + "description": "Specifies whether consolidation is enabled for this location. When this value is 'true', the consolidation is enabled for this location.\n" + }, + "isBypassProxyEnabled": { + "type": "boolean", + "description": "Specifies whether the archival traffic to the object store location should bypass the system network proxy configuration if present. This setting only supports S3 compatible object store locations.\n" + }, + "storageClass": { + "type": "string", + "description": "Specifies the storage class configured for the archival location. Only applicable for Object Store locations.\n" + }, + "immutabilityLockSummary": { + "description": "Specifies the immutability lock policy for this archival location. Files in an archival location with an immutability lock cannot be altered or deleted for the duration of the immutability lock, which is specified by the immutability lock policy. The duration of an immutability lock cannot be reduced or removed once applied. Immutability Lock is only supported for Azure archival locations.\n", + "$ref": "#/definitions/ImmutabilityLockSummary" + } + } + }, + "ArchivalLocationSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ArchivalLocationSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ArchivalMigrateSnappableDefinition": { + "type": "object", + "required": [ + "sourceLocationId", + "targetLocationId" + ], + "properties": { + "sourceLocationId": { + "type": "string", + "description": "ID of the source archival location to migrate from." + }, + "targetLocationId": { + "type": "string", + "description": "ID of the target archival location to migrate to." + } + } + }, + "AwsCustomerAccountRoleSummary": { + "type": "object", + "required": [ + "awsAccountId", + "cloudAccountRoles" + ], + "properties": { + "awsAccountId": { + "type": "string", + "description": "Customer AWS account id." + }, + "cloudAccountRoles": { + "type": "array", + "description": "List of cloud account role summaries for each IAM role in the AWS customer account.\n", + "items": { + "$ref": "#/definitions/CloudAccountRoleSummary" + } + } + } + }, + "AzureComputeSecret": { + "type": "object", + "required": [ + "clientSecret" + ], + "properties": { + "clientSecret": { + "type": "string", + "description": "Secret key of the Rubrik app in Azure Active Directory.", + "x-secret": true + } + } + }, + "AzureComputeSummary": { + "type": "object", + "required": [ + "clientId", + "containerName", + "environment", + "generalPurposeStorageAccountName", + "region", + "subscriptionId", + "tenantId" + ], + "properties": { + "tenantId": { + "type": "string", + "description": "Tenant ID of the Rubrik app in Azure Active Directory." + }, + "subscriptionId": { + "type": "string", + "description": "Subscription ID of the Azure subscription which will be used for instantiation." + }, + "clientId": { + "type": "string", + "description": "Client ID of the Rubrik app in Azure Active Directory." + }, + "region": { + "type": "string", + "description": "Azure region for cloud instantiation." + }, + "generalPurposeStorageAccountName": { + "type": "string", + "description": "Storage account name to be used for cloud instantiation. This storage account must be a General Purpose Azure Storage Account, and must be in the same region as above." + }, + "containerName": { + "type": "string", + "description": "Container name that will be used by Rubrik for cloud instantiation." + }, + "environment": { + "description": "Azure Environment.", + "$ref": "#/definitions/AzureSupportedEnvironment" + } + } + }, + "AzureCustomerAccountRoleSummary": { + "type": "object", + "required": [ + "cloudAccountRoles", + "customerTenantId", + "servicePrincipalId", + "subscriptionId" + ], + "properties": { + "customerTenantId": { + "type": "string", + "description": "Tenant ID of the Azure customer account." + }, + "servicePrincipalId": { + "type": "string", + "description": "Service principal ID of the Azure customer account." + }, + "cloudAccountRoles": { + "type": "array", + "description": "List of cloud account role summaries for each custom role in the Azure customer subscription.\n", + "items": { + "$ref": "#/definitions/CloudAccountRoleSummary" + } + }, + "subscriptionId": { + "type": "string", + "description": "Subscription ID of the Azure customer account." + } + } + }, + "AzureSupportedEnvironment": { + "type": "string", + "description": "Environments supported by Azure.\n", + "enum": [ + "AZURE", + "AZURE_CHINA", + "AZURE_GERMANY", + "AZURE_US_GOVERNMENT" + ] + }, + "CloudAccountRoleSummary": { + "type": "object", + "required": [ + "featureId", + "roleId" + ], + "properties": { + "featureId": { + "type": "string", + "description": "Feature ID of the cross-account roles." + }, + "roleId": { + "type": "string", + "description": "ID of the cross-account role created for a specific feature, such as cloudOn.\n" + } + } + }, + "CloudStorageColdTier": { + "type": "string", + "description": "The set of storage classes that support Cold tiering during smart or instant tiering.\n", + "enum": [ + "AzureArchive", + "Glacier", + "GlacierDeepArchive" + ] + }, + "CloudStorageRehydrationSpeed": { + "type": "string", + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. This is applicable only for AWS S3 and Azure archival locations. For AWS, three speed options are supported -- AwsExpedited, AwsStandard, and AwsBulk. AwsBulk is the cheapest and slowest. AwsExpedited is the fastest and most expensive. AwsStandard is the recommended default value. For Azure the only option is AzureStandard.\n", + "enum": [ + "AwsBulk", + "AwsStandard", + "AwsExpedited", + "AzureStandard" + ] + }, + "DataLocationEndpointStatus": { + "type": "object", + "required": [ + "message", + "status" + ], + "properties": { + "status": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + }, + "DataLocationOwnershipStatus": { + "type": "string", + "description": "The ownership status that the current cluster has with respect to the data location.\n", + "enum": [ + "OwnerActive", + "Paused", + "Reader", + "Deleted", + "Disabled" + ] + }, + "DataLocationTeardownRequest": { + "type": "object", + "required": [ + "dataLocationId" + ], + "properties": { + "dataLocationId": { + "type": "string" + } + } + }, + "DcaLocationDefinition": { + "allOf": [ + { + "$ref": "#/definitions/DcaLocationSummary" + }, + { + "type": "object", + "required": [ + "certificateContent" + ], + "properties": { + "certificateContent": { + "type": "string", + "description": "Signed client certificate for connecting to CAP server." + }, + "pemFileContent": { + "type": "string", + "description": "Key for encryption using RSA key pair.", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "description": "KMS master key ID for encryption.", + "x-secret": true + } + } + } + ] + }, + "DcaLocationDetail": { + "type": "object", + "required": [ + "definition", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "definition": { + "$ref": "#/definitions/DcaLocationSummary" + }, + "readerLocationSummary": { + "description": "Status of the reader archival location.", + "$ref": "#/definitions/ReaderLocationSummary" + } + } + }, + "DcaLocationDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DcaLocationDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DcaLocationSummary": { + "type": "object", + "required": [ + "agency", + "bucket", + "capEndpoint", + "endpoint", + "mission", + "name", + "role", + "storageClass" + ], + "properties": { + "name": { + "type": "string", + "description": "Location name." + }, + "bucket": { + "type": "string", + "description": "Bucket name." + }, + "endpoint": { + "type": "string", + "description": "AWS endpoint URL." + }, + "role": { + "type": "string", + "description": "Name of IAM role to use for accessing DCA. Given to CAP server.\n" + }, + "agency": { + "type": "string", + "description": "Name of agency to pass to CAP server." + }, + "mission": { + "type": "string", + "description": "Name of mission to pass to CAP server." + }, + "tokenDurationMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration of validity of temporary credentials. Allowed range: 15 to 60. Default: 60 minutes.\n" + }, + "capEndpoint": { + "type": "string", + "description": "URL of CAP credentials server." + }, + "storageClass": { + "type": "string", + "description": "Storage class to associate with the bucket." + } + } + }, + "DcaReaderConnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/DcaLocationDefinition" + }, + { + "type": "object", + "properties": { + "shouldRecoverSnappableMetadataOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether recovery from an archival location includes metadata from both the snapshot and the snappable. When the value is 'true,' recovery only includes metadata from the snappable. When the value is 'false,' recovery includes metadata for both the snappable and the snapshot.\n" + } + } + } + ] + }, + "DcaUpdateDefinition": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Location name." + }, + "role": { + "type": "string", + "description": "Name of IAM role to use for accessing DCA. Given to CAP server.\n" + }, + "agency": { + "type": "string", + "description": "Name of agency to pass to CAP server." + }, + "mission": { + "type": "string", + "description": "Name of mission to pass to CAP server." + }, + "tokenDurationMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration of validity of temporary credentials. Allowed range: 15 to 60. Default: 60 minutes.\n" + }, + "capEndpoint": { + "type": "string", + "description": "URL of CAP credentials server." + }, + "certificateContent": { + "type": "string", + "description": "Signed client certificate for connecting to CAP server." + }, + "storageClass": { + "type": "string", + "description": "Storage class to associate with the bucket." + } + } + }, + "DefaultComputeNetworkConfig": { + "type": "object", + "required": [ + "securityGroupId", + "subnetId", + "vNetId" + ], + "properties": { + "subnetId": { + "type": "string", + "description": "ID of the subnet to assign to the Rubrik CDM instance." + }, + "vNetId": { + "type": "string", + "description": "ID of the virtual network to assign to the Rubrik CDM instance.\n" + }, + "securityGroupId": { + "type": "string", + "description": "ID of the security group to assign to the Rubrik CDM instance.\n" + }, + "resourceGroupId": { + "type": "string", + "description": "ID of the resource group to assign to the Rubrik CDM instance.\n" + } + } + }, + "EncryptionType": { + "type": "string", + "description": "Methods used to encrypt an archival location.", + "enum": [ + "RSA_KEY_ENCRYPTION", + "KMS_ID_ENCRYPTION", + "RUBRIK_PASSWORD_ENCRYPTION" + ] + }, + "GatewayInfo": { + "type": "object", + "required": [ + "address", + "ports" + ], + "properties": { + "address": { + "type": "string" + }, + "ports": { + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } + } + } + }, + "GlacierConfig": { + "type": "object", + "properties": { + "retrievalTier": { + "$ref": "#/definitions/GlacierRetrievalTier" + }, + "vaultLockPolicy": { + "description": "If this field is set, a vault lock with the specified policy will be initiated on the Glacier vault.\n", + "$ref": "#/definitions/GlacierVaultLockPolicy" + } + } + }, + "GlacierReaderConnectConfig": { + "type": "object", + "properties": { + "retrievalTier": { + "$ref": "#/definitions/GlacierRetrievalTier" + } + } + }, + "GlacierRetrievalTier": { + "type": "string", + "description": "The retrieval tier to use for downloads from Amazon Glacier. This affects the waiting time for the associated Glacier retrieval job to complete.\n", + "enum": [ + "BulkRetrieval", + "StandardRetrieval", + "ExpeditedRetrieval" + ] + }, + "GlacierSummary": { + "type": "object", + "properties": { + "retrievalTier": { + "$ref": "#/definitions/GlacierRetrievalTier" + }, + "vaultLockStatus": { + "description": "Description of the current vault lock status of the Glacier vault.\n", + "$ref": "#/definitions/GlacierVaultLockStatus" + } + } + }, + "GlacierVaultLockOperation": { + "type": "object", + "required": [ + "operation" + ], + "properties": { + "operation": { + "description": "'The Glacier vault lock operation to perform: Complete or Abort.'\n", + "$ref": "#/definitions/GlacierVaultLockOperationType" + } + } + }, + "GlacierVaultLockOperationType": { + "type": "string", + "description": "The type of vault lock operation to perform: Complete or Abort.", + "enum": [ + "Complete", + "Abort" + ] + }, + "GlacierVaultLockPolicy": { + "type": "object", + "properties": { + "fileLockPeriodInDays": { + "type": "integer", + "format": "int32", + "description": "The minimum age that a Glacier archive must be before it can be deleted from Glacier.\n" + } + } + }, + "GlacierVaultLockStatus": { + "allOf": [ + { + "$ref": "#/definitions/GlacierVaultLockPolicy" + }, + { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "$ref": "#/definitions/GlacierVaultLockStatusType" + }, + "expiryTime": { + "type": "string", + "format": "date-time", + "description": "For an InProgress vault lock, the time at which, if not completed, the vault lock will expire, returning the Glacier vault to the Unlocked state.\n" + } + } + } + ] + }, + "GlacierVaultLockStatusType": { + "type": "string", + "description": "The current state of the vault lock for a given Glacier vault.", + "enum": [ + "Unlocked", + "InProgress", + "Locked" + ] + }, + "ImmutabilityLockSummary": { + "type": "object", + "properties": { + "bucketLockingPolicyDurationInDays": { + "type": "integer", + "format": "int64", + "description": "The duration of the immutability lock specified as an integer number of days.", + "minimum": 1 + } + } + }, + "LocationProxyConfig": { + "allOf": [ + { + "$ref": "#/definitions/LocationProxySummary" + }, + { + "type": "object", + "properties": { + "password": { + "type": "string", + "description": "The proxy server password.", + "x-secret": true + } + } + } + ] + }, + "LocationProxyConfigPatch": { + "type": "object", + "properties": { + "protocol": { + "$ref": "#/definitions/ProxyProtocol" + }, + "proxyServer": { + "type": "string", + "description": "The proxy server IP or FQDN (Fully qualified domain name)." + }, + "portNumber": { + "type": "integer", + "format": "int32", + "description": "The proxy server port number. If it is not specified, the default port number based on the proxy protocol will be used.\n" + }, + "userName": { + "type": "string", + "description": "The proxy server user name." + }, + "password": { + "type": "string", + "description": "The proxy server password.", + "x-secret": true + } + } + }, + "LocationProxyConfigs": { + "type": "object", + "properties": { + "archivalProxyConfig": { + "description": "Archival proxy configuration for the object store location.\n", + "$ref": "#/definitions/LocationProxyConfig" + }, + "computeProxyConfig": { + "description": "Compute proxy configuration for the object store location.\n", + "$ref": "#/definitions/LocationProxyConfig" + } + } + }, + "LocationProxySummary": { + "type": "object", + "required": [ + "protocol", + "proxyServer" + ], + "properties": { + "protocol": { + "$ref": "#/definitions/ProxyProtocol" + }, + "proxyServer": { + "type": "string", + "description": "The proxy server IP or FQDN (Fully qualified domain name)." + }, + "portNumber": { + "type": "integer", + "format": "int32", + "description": "The proxy server port number. If it is not specified, the default port number based on the proxy protocol will be used.\n" + }, + "userName": { + "type": "string", + "description": "The proxy server user name." + } + } + }, + "NfsLocationCreationDefinition": { + "allOf": [ + { + "$ref": "#/definitions/NfsLocationDefinition" + }, + { + "type": "object", + "properties": { + "encryptionPassword": { + "type": "string", + "description": "Encryption password for the specified archival location. The password cannot be changed after creating the archival location. Do not specify a password when the value of 'disableEncryption' is 'true'.\n", + "x-secret": true + }, + "disableEncryption": { + "type": "boolean", + "description": "An optional Boolean that determines whether encryption is disabled for the specified archival location. When this value is 'true', encryption is disabled for the archival location.\n", + "default": false + } + } + } + ] + }, + "NfsLocationDefinition": { + "type": "object", + "required": [ + "bucket", + "exportDir", + "fileLockPeriodInSeconds", + "host", + "name" + ], + "properties": { + "host": { + "type": "string", + "description": "The fully qualified host name or the IP address of the NFS server.\n" + }, + "exportDir": { + "type": "string", + "description": "The path to the exported directory on the NFS server. The Rubrik cluster mounts the directory as an NFS client.\n" + }, + "nfsVersion": { + "type": "integer", + "format": "int32", + "description": "NFS protocol version to communicate with the NFS server. Rubrik cluster supports only NFS protocol version 3.\n" + }, + "authType": { + "type": "string", + "description": "Authorization type to connect with the NFS server." + }, + "otherNfsOptions": { + "type": "string", + "description": "Additional NFS options when using NFS protocol." + }, + "fileLockPeriodInSeconds": { + "type": "integer", + "format": "int64", + "description": "Specifies a minimum period during which files cannot be deleted by the Rubrik cluster.\n" + }, + "bucket": { + "type": "string", + "description": "Name of the bucket, or directory, that is created under the directory specified by exportDir, on the NFS server. This bucket is the root directory for the archival location. The bucket name cannot be edited and cannot contain whitespace or any of these characters _\\\\/*?%.:|<>.\n" + }, + "name": { + "type": "string", + "description": "Name of the archival location. The name of the archival location is editable.\n" + }, + "isConsolidationEnabled": { + "type": "boolean", + "description": "Specifies whether consolidation should be enabled for this location. When this value is 'true', the consolidation is enabled for this location.\n" + } + } + }, + "NfsLocationDetail": { + "type": "object", + "required": [ + "availableSpace", + "definition", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "definition": { + "$ref": "#/definitions/NfsLocationDefinition" + }, + "availableSpace": { + "type": "integer", + "format": "int64", + "description": "Available space on the archival location in bytes. A value of -1 indicates information is not available.\n" + }, + "polarisManagedId": { + "type": "string", + "description": "Optional field containing Polaris managed id of the Polaris managed Archival Locations. This field will be set only if the Archival Location is Polaris managed." + }, + "readerLocationSummary": { + "description": "Status of the reader archival location.", + "$ref": "#/definitions/ReaderLocationSummary" + } + } + }, + "NfsLocationDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NfsLocationDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NfsLocationReconnectSpec": { + "type": "object", + "required": [ + "bucket", + "exportDir", + "fileLockPeriodInSeconds", + "host", + "name" + ], + "properties": { + "host": { + "type": "string", + "description": "Host name of Nfs server." + }, + "exportDir": { + "type": "string", + "description": "The export directory path within the Nfs server." + }, + "nfsVersion": { + "type": "integer", + "format": "int32", + "description": "NFS protocol version to communicate with Nfs server when using NFS mount protocol." + }, + "authType": { + "type": "string", + "description": "Authorization type to connect with Nfs host." + }, + "otherNfsOptions": { + "type": "string", + "description": "Additional NFS options when using NFS protocol." + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<>." + }, + "fileLockPeriodInSeconds": { + "type": "integer", + "format": "int64", + "description": "File lock period for the Nfs server." + }, + "name": { + "type": "string", + "description": "Name of this archival location." + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting data for the specified archival location.", + "x-secret": true + } + } + }, + "NfsLocationUpdate": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "exportDir": { + "type": "string" + }, + "nfsVersion": { + "type": "integer", + "format": "int32" + }, + "authType": { + "type": "string" + }, + "fileLockPeriodInSeconds": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "isConsolidationEnabled": { + "type": "boolean" + } + } + }, + "NfsReaderConnectDefinition": { + "type": "object", + "required": [ + "bucket", + "exportDir", + "fileLockPeriodInSeconds", + "host", + "name" + ], + "properties": { + "host": { + "type": "string", + "description": "Host name of Nfs server." + }, + "exportDir": { + "type": "string", + "description": "The export directory path within the Nfs server." + }, + "nfsVersion": { + "type": "integer", + "format": "int32", + "description": "NFS protocol version to communicate with Nfs server when using NFS mount protocol." + }, + "authType": { + "type": "string", + "description": "Authorization type to connect with Nfs host." + }, + "otherNfsOptions": { + "type": "string", + "description": "Additional NFS options when using NFS protocol." + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<>." + }, + "fileLockPeriodInSeconds": { + "type": "integer", + "format": "int64", + "description": "File lock period for the Nfs server." + }, + "name": { + "type": "string", + "description": "Name of this archival location." + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting data for the specified archival location.", + "x-secret": true + }, + "shouldRecoverSnappableMetadataOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether recovery from an archival location includes metadata from both the snapshot and the snappable. When the value is 'true,' recovery only includes metadata from the snappable. When the value is 'false,' recovery includes metadata for both the snappable and the snapshot.\n" + } + } + }, + "ObjectStoreLocationDefinition": { + "allOf": [ + { + "$ref": "#/definitions/ObjectStoreLocationSummary" + }, + { + "$ref": "#/definitions/LocationProxyConfigs" + }, + { + "type": "object", + "required": [ + "accessKey", + "secretKey" + ], + "properties": { + "accessKey": { + "type": "string" + }, + "secretKey": { + "type": "string", + "description": "The secret key to access the archival location.", + "x-secret": true + }, + "glacierConfig": { + "$ref": "#/definitions/GlacierConfig" + }, + "azureComputeSecret": { + "$ref": "#/definitions/AzureComputeSecret" + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting data for the specified archival location.", + "x-secret": true + }, + "pemFileContent": { + "type": "string", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "ObjectStoreLocationDetail": { + "type": "object", + "required": [ + "definition", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "polarisManagedId": { + "type": "string", + "description": "An optional field that contains the ID of the Polaris managed archival locations. This field is only set for archival locations managed by Polaris.\n" + }, + "definition": { + "$ref": "#/definitions/ObjectStoreLocationSummary" + }, + "glacierStatus": { + "$ref": "#/definitions/GlacierSummary" + }, + "archivalProxySummary": { + "description": "Archival proxy summary for the object store location.\n", + "$ref": "#/definitions/LocationProxySummary" + }, + "computeProxySummary": { + "description": "Compute proxy summary for the object store location.\n", + "$ref": "#/definitions/LocationProxySummary" + }, + "readerLocationSummary": { + "description": "Status of the reader archival location.", + "$ref": "#/definitions/ReaderLocationSummary" + } + } + }, + "ObjectStoreLocationDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ObjectStoreLocationDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ObjectStoreLocationSummary": { + "type": "object", + "required": [ + "bucket", + "name", + "objectStoreType" + ], + "properties": { + "objectStoreType": { + "type": "string" + }, + "name": { + "type": "string" + }, + "accessKey": { + "type": "string", + "description": "The customer account access key. This credential is required when the account credentials type is KeyBased.\n" + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<> For AWS, bucket name also cannot contain capital letters or underscore.\n" + }, + "defaultRegion": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "numBuckets": { + "type": "integer", + "format": "int64" + }, + "isComputeEnabled": { + "type": "boolean" + }, + "isConsolidationEnabled": { + "type": "boolean" + }, + "isBypassProxyEnabled": { + "type": "boolean", + "description": "This field determines whether the archival traffic to the object store location bypasses the system network proxy configuration.\n" + }, + "defaultComputeNetworkConfig": { + "$ref": "#/definitions/DefaultComputeNetworkConfig" + }, + "storageClass": { + "type": "string" + }, + "azureComputeSummary": { + "$ref": "#/definitions/AzureComputeSummary" + }, + "encryptionType": { + "description": "The encryption method used for client-side encryption for this object storage location.\n", + "$ref": "#/definitions/EncryptionType" + }, + "accountCredentialsType": { + "description": "This field stores which type of account credentials are used for this object store location. This is only required when the account credentials are not KeyBased.\n", + "$ref": "#/definitions/AccountCredentialsType" + }, + "isAppFlowsOrCloudOnSupported": { + "type": "boolean" + }, + "awsArchivalRoleArn": { + "type": "string", + "description": "The AWS IAM based customer account role ARN for Archival feature. This field is only populated when the credentials type is CrossAccount and the cloud vendor is AWS.\n" + }, + "cloudRehydrationSpeed": { + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. This is applicable only for AWS S3 and Azure archival locations. For AWS, three speed options are supported -- AwsExpedited, AwsStandard, and AwsBulk. AwsBulk is the cheapest and slowest. AwsExpedited is the fastest and most expensive. AwsStandard is the recommended default value. For Azure the only option is AzureStandard.\n", + "$ref": "#/definitions/CloudStorageRehydrationSpeed" + }, + "immutabilityLockSummary": { + "description": "Specifies the immutability lock policy for this archival location. Files in an archival location with an immutability lock cannot be altered or deleted for the duration of the immutability lock, which is specified by the immutability lock policy. The duration of an immutability lock cannot be reduced or removed once applied. Immutability Lock is only supported for Azure archival locations.\n", + "$ref": "#/definitions/ImmutabilityLockSummary" + } + } + }, + "ObjectStoreReaderConnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/LocationProxyConfigs" + }, + { + "type": "object", + "required": [ + "accessKey", + "bucket", + "name", + "objectStoreType", + "secretKey" + ], + "properties": { + "objectStoreType": { + "type": "string", + "description": "Type of object store location." + }, + "name": { + "type": "string", + "description": "Name of object store location." + }, + "accessKey": { + "type": "string", + "description": "Access key for the specified object store location." + }, + "secretKey": { + "type": "string", + "description": "Secret key for the specified object store location.", + "x-secret": true + }, + "bucket": { + "type": "string", + "description": "Name for the bucket. For all locations, the name cannot contain whitespace, or any of the following characters: _\\\\/*?%.:|<>. For AWS, the bucket name also cannot contain capital letters or an underscore character.\n" + }, + "pemFileContent": { + "type": "string", + "description": "Contents of PEM file to use for encryption.", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "description": "KMS master key ID for encryption on AWS object store locations.\n", + "x-secret": true + }, + "defaultRegion": { + "type": "string", + "description": "Default region for the object store location." + }, + "bucketRegion": { + "type": "string", + "description": "Bucket region for the object store location." + }, + "endpoint": { + "type": "string", + "description": "Endpoint for the specified object store location." + }, + "isComputeEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether to enable compute services for the specified object store location. Use true to enable compute services and false to disable compute services.\n" + }, + "isBypassProxyEnabled": { + "type": "boolean", + "description": "Determines whether the archival traffic to the object store location bypasses the system network proxy configuration if present. This setting only supports S3 compatible object store locations.\n" + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting data for the specified object store location.\n", + "x-secret": true + }, + "defaultComputeNetworkConfig": { + "description": "Default network configuration to use when compute services are enabled.\n", + "$ref": "#/definitions/DefaultComputeNetworkConfig" + }, + "storageClass": { + "type": "string", + "description": "Storage class to associate with the bucket." + }, + "glacierReaderConnectConfig": { + "description": "Glacier-specific configuration to use when connecting to a Glacier location.\n", + "$ref": "#/definitions/GlacierReaderConnectConfig" + }, + "azureComputeSummary": { + "$ref": "#/definitions/AzureComputeSummary" + }, + "azureComputeSecret": { + "$ref": "#/definitions/AzureComputeSecret" + }, + "shouldRecoverSnappableMetadataOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether recovery from an archival location includes metadata from both the snapshot and the snappable. When the value is 'true,' recovery only includes metadata from the snappable. When the value is 'false,' recovery includes metadata for both the snappable and the snapshot.\n" + }, + "shouldSkipScheduleRecoverArchivedMetadataJob": { + "type": "boolean", + "description": "A Boolean value that determines whether to schedule the archival recovery job. When the value is 'false,' the recovery job is scheduled normally. When the value is 'true,' the recovery job is not scheduled. The default behavior is to schedule the recovery job.\n" + }, + "cloudRehydrationSpeed": { + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. This is applicable only for AWS S3 and Azure archival locations. For AWS, three speed options are supported -- AwsExpedited, AwsStandard, and AwsBulk. AwsBulk is the cheapest and slowest. AwsExpedited is the fastest and most expensive. AwsStandard is the recommended default value. For Azure the only option is AzureStandard.\n", + "$ref": "#/definitions/CloudStorageRehydrationSpeed" + } + } + } + ] + }, + "ObjectStoreReconnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/LocationProxyConfigs" + }, + { + "type": "object", + "required": [ + "accessKey", + "bucket", + "name", + "objectStoreType", + "secretKey" + ], + "properties": { + "objectStoreType": { + "type": "string", + "description": "Type of object store location." + }, + "name": { + "type": "string", + "description": "Name of object store location." + }, + "accessKey": { + "type": "string", + "description": "Access key for the specified object store location." + }, + "secretKey": { + "type": "string", + "description": "Secret key for the specified object store location." + }, + "bucket": { + "type": "string", + "description": "Name for the bucket. For all locations, the name cannot contain whitespace, or any of the following characters: _\\\\/*?%.:|<>. For AWS, the bucket name also cannot contain capital letters or an underscore character.\n" + }, + "pemFileContent": { + "type": "string", + "description": "Contents of PEM file to use for encryption.", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "description": "KMS master key ID for encryption on AWS object store locations.\n", + "x-secret": true + }, + "defaultRegion": { + "type": "string", + "description": "Default region for the object store location." + }, + "bucketRegion": { + "type": "string", + "description": "Bucket region for the object store location." + }, + "endpoint": { + "type": "string", + "description": "Endpoint for the specified object store location." + }, + "isComputeEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether to enable compute services for the specified object store location. Use true to enable compute services and false to disable compute services.\n" + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting data for the specified object store location.\n", + "x-secret": true + }, + "defaultComputeNetworkConfig": { + "description": "Default network configuration to use when compute services are enabled.\n", + "$ref": "#/definitions/DefaultComputeNetworkConfig" + } + } + } + ] + }, + "ObjectStoreUpdateDefinition": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "accessKey": { + "type": "string" + }, + "secretKey": { + "type": "string", + "x-secret": true + }, + "endpoint": { + "type": "string" + }, + "numBuckets": { + "type": "integer", + "format": "int64" + }, + "isComputeEnabled": { + "type": "boolean" + }, + "isConsolidationEnabled": { + "type": "boolean" + }, + "isBypassProxyEnabled": { + "type": "boolean", + "description": "Determines whether the archival traffic to the object store location bypasses the system network proxy configuration if present. This setting only supports S3 compatible object store locations.\n" + }, + "defaultComputeNetworkConfig": { + "$ref": "#/definitions/DefaultComputeNetworkConfig" + }, + "storageClass": { + "type": "string" + }, + "glacierConfig": { + "$ref": "#/definitions/GlacierConfig" + }, + "azureComputeSummary": { + "$ref": "#/definitions/AzureComputeSummary" + }, + "azureComputeSecret": { + "$ref": "#/definitions/AzureComputeSecret" + }, + "archivalProxyConfig": { + "$ref": "#/definitions/LocationProxyConfigPatch" + }, + "computeProxyConfig": { + "$ref": "#/definitions/LocationProxyConfigPatch" + }, + "cloudRehydrationSpeed": { + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. This is applicable only for AWS S3 and Azure archival locations. For AWS, three speed options are supported -- AwsExpedited, AwsStandard, and AwsBulk. AwsBulk is the cheapest and slowest. AwsExpedited is the fastest and most expensive. AwsStandard is the recommended default value. For Azure the only option is AzureStandard.\n", + "$ref": "#/definitions/CloudStorageRehydrationSpeed" + }, + "immutabilityLockSummary": { + "description": "Specifies the immutability lock policy for this archival location. Files in an archival location with an immutability lock cannot be altered or deleted for the duration of the immutability lock, which is specified by the immutability lock policy. The duration of an immutability lock cannot be reduced or removed once applied. Immutability Lock is only supported for Azure archival locations.\n", + "$ref": "#/definitions/ImmutabilityLockSummary" + } + } + }, + "ProxyProtocol": { + "type": "string", + "description": "Protocol used to communicate with proxy server.\n", + "enum": [ + "HTTP", + "HTTPS", + "SOCKS5" + ] + }, + "QstarLocationDefinition": { + "type": "object", + "required": [ + "bucket", + "host", + "integralVolume", + "mountProtocol", + "name" + ], + "properties": { + "host": { + "type": "string", + "description": "Hostname of the QStar server." + }, + "integralVolume": { + "type": "string", + "description": "QStar integral volume to mount." + }, + "bucket": { + "type": "string", + "description": "Bucket under the integral volume. The name cannot contain whitespace or _\\\\/*?%.:|<>." + }, + "name": { + "type": "string", + "description": "Name of this archival location." + }, + "mountProtocol": { + "type": "string", + "description": "Protocol to connect with the QStar server.", + "enum": [ + "NFS", + "CIFS" + ] + }, + "nfsVersion": { + "type": "integer", + "format": "int32", + "description": "NFS protocol version to communicate with QStar server when using NFS mount protocol." + }, + "otherNfsOptions": { + "type": "string", + "description": "Additional NFS options when using NFS protocol." + }, + "otherCifsOptions": { + "type": "string", + "description": "Additional CIFS options when using CIFS protocol." + }, + "port": { + "type": "string", + "description": "The QStar server port." + } + } + }, + "QstarLocationDefinitionWithCredential": { + "allOf": [ + { + "$ref": "#/definitions/QstarLocationDefinition" + }, + { + "type": "object", + "required": [ + "encryptionPassword", + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "User name credentials to access the QStar server." + }, + "password": { + "type": "string", + "description": "Password credentials to access the QStar server.", + "x-secret": true + }, + "encryptionPassword": { + "type": "string", + "description": "Password for encrypting the QStar archival location.", + "x-secret": true + } + } + } + ] + }, + "QstarLocationDetail": { + "type": "object", + "required": [ + "availableSpace", + "definition", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "QStar archival location identifier." + }, + "definition": { + "description": "Details of the QStar archival location.", + "$ref": "#/definitions/QstarLocationDefinition" + }, + "availableSpace": { + "type": "integer", + "format": "int64", + "description": "Available space on the archival location in bytes. A value of -1 indicates information is not available.\n" + }, + "readerLocationSummary": { + "description": "Status of the reader archival location.", + "$ref": "#/definitions/ReaderLocationSummary" + } + } + }, + "QstarLocationSummary": { + "type": "object", + "required": [ + "availableSpace", + "definition", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "QStar archival location identifier." + }, + "definition": { + "description": "Details of the QStar archival location.", + "$ref": "#/definitions/QstarLocationDefinition" + }, + "availableSpace": { + "type": "integer", + "format": "int64", + "description": "Available space on the archival location in bytes. A value of -1 indicates information is not available.\n" + }, + "readerLocationSummary": { + "description": "Status of the reader archival location.", + "$ref": "#/definitions/ReaderLocationSummary" + } + } + }, + "QstarLocationSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/QstarLocationSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "QstarLocationUpdate": { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "Hostname of the QStar server." + }, + "name": { + "type": "string", + "description": "Name of this archival location." + }, + "username": { + "type": "string", + "description": "User name credentials to access the QStar server." + }, + "password": { + "type": "string", + "description": "Password credentials to access the QStar server.", + "x-secret": true + }, + "nfsVersion": { + "type": "integer", + "format": "int32", + "description": "NFS protocol version to communicate with QStar server when using NFS mount protocol." + }, + "otherNfsOptions": { + "type": "string", + "description": "Additional NFS options when using NFS protocol." + }, + "otherCifsOptions": { + "type": "string", + "description": "Additional CIFS options when using CIFS protocol." + } + } + }, + "QtarReaderConnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/QstarLocationDefinitionWithCredential" + }, + { + "type": "object", + "properties": { + "shouldRecoverSnappableMetadataOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether recovery from an archival location includes metadata from both the snapshot and the snappable. When the value is 'true,' recovery only includes metadata from the snappable. When the value is 'false,' recovery includes metadata for both the snappable and the snapshot.\n" + } + } + } + ] + }, + "ReaderLocationRefreshState": { + "type": "string", + "description": "The refresh state of a reader location.", + "enum": [ + "NeverRefreshed", + "InProgress", + "NotRunning" + ] + }, + "ReaderLocationSummary": { + "type": "object", + "properties": { + "state": { + "$ref": "#/definitions/ReaderLocationRefreshState" + }, + "refreshedTime": { + "type": "string", + "format": "date-time", + "description": "Most recent refresh time for this reader location." + } + } + }, + "RecoverArchivedMetadataRequest": { + "type": "object", + "required": [ + "dataLocationId" + ], + "properties": { + "dataLocationId": { + "type": "string" + } + } + }, + "RecoverArchivedMetadataResponse": { + "type": "object", + "required": [ + "message", + "status" + ], + "properties": { + "status": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + }, + "RemoveNfsBucketRequest": { + "type": "object", + "required": [ + "bucketPrefix", + "locationDefinition", + "olderThanHours" + ], + "properties": { + "bucketPrefix": { + "type": "string" + }, + "olderThanHours": { + "type": "integer", + "format": "int32" + }, + "locationDefinition": { + "$ref": "#/definitions/NfsLocationDefinition" + } + } + }, + "RemoveObjectStoreBucketRequest": { + "type": "object", + "required": [ + "bucketPrefix", + "locationDefinition", + "olderThanHours" + ], + "properties": { + "bucketPrefix": { + "type": "string" + }, + "olderThanHours": { + "type": "integer", + "format": "int32" + }, + "locationDefinition": { + "$ref": "#/definitions/ObjectStoreLocationDefinition" + } + } + }, + "RemoveQstarBucketRequest": { + "type": "object", + "required": [ + "bucketPrefix", + "locationDefinition", + "olderThanHours" + ], + "properties": { + "bucketPrefix": { + "type": "string" + }, + "olderThanHours": { + "type": "integer", + "format": "int32" + }, + "locationDefinition": { + "$ref": "#/definitions/QstarLocationDefinitionWithCredential" + } + } + }, + "AdminPrivileges": { + "type": "object", + "properties": { + "fullAdmin": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "AuthorizationSummary": { + "type": "object", + "required": [ + "admin", + "complianceOfficer", + "endUser", + "gpsUser", + "infraAdmin", + "managedVolumeAdmin", + "managedVolumeUser", + "orgAdmin", + "organization", + "principal", + "readOnlyAdmin" + ], + "properties": { + "principal": { + "type": "string" + }, + "gpsUser": { + "$ref": "#/definitions/GpsUserPrivileges" + }, + "endUser": { + "$ref": "#/definitions/EndUserPrivileges" + }, + "managedVolumeAdmin": { + "$ref": "#/definitions/ManagedVolumeAdminPrivileges" + }, + "organization": { + "$ref": "#/definitions/OrganizationPrivileges" + }, + "admin": { + "$ref": "#/definitions/AdminPrivileges" + }, + "readOnlyAdmin": { + "$ref": "#/definitions/ReadOnlyAdminPrivileges" + }, + "orgAdmin": { + "$ref": "#/definitions/OrgAdminPrivileges" + }, + "managedVolumeUser": { + "$ref": "#/definitions/ManagedVolumeUserPrivileges" + }, + "complianceOfficer": { + "$ref": "#/definitions/ComplianceOfficerPrivileges" + }, + "infraAdmin": { + "$ref": "#/definitions/InfraAdminPrivileges" + } + } + }, + "ComplianceOfficerPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveAdminPrivileges": { + "type": "object", + "properties": { + "fullAdmin": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveAdminRole": { + "type": "object", + "required": [ + "fullAdmin" + ], + "properties": { + "fullAdmin": { + "type": "boolean" + } + } + }, + "EffectiveAuthorizationRoles": { + "type": "object", + "required": [ + "admin", + "complianceOfficer", + "endUser", + "gpsUser", + "infraAdmin", + "managedVolumeAdmin", + "managedVolumeUser", + "orgAdmin", + "organization", + "principal", + "readOnlyAdmin" + ], + "properties": { + "principal": { + "type": "string" + }, + "gpsUser": { + "$ref": "#/definitions/EffectiveGpsUserRole" + }, + "endUser": { + "$ref": "#/definitions/EffectiveEndUserRole" + }, + "managedVolumeAdmin": { + "$ref": "#/definitions/EffectiveManagedVolumeAdminRole" + }, + "organization": { + "$ref": "#/definitions/EffectiveOrganizationRole" + }, + "admin": { + "$ref": "#/definitions/EffectiveAdminRole" + }, + "readOnlyAdmin": { + "$ref": "#/definitions/EffectiveReadOnlyAdminRole" + }, + "orgAdmin": { + "$ref": "#/definitions/EffectiveOrgAdminRole" + }, + "managedVolumeUser": { + "$ref": "#/definitions/EffectiveManagedVolumeUserRole" + }, + "complianceOfficer": { + "$ref": "#/definitions/EffectiveComplianceOfficerRole" + }, + "infraAdmin": { + "$ref": "#/definitions/EffectiveInfraAdminRole" + } + } + }, + "EffectiveAuthorizationSummary": { + "type": "object", + "required": [ + "admin", + "complianceOfficer", + "endUser", + "gpsUser", + "infraAdmin", + "managedVolumeAdmin", + "managedVolumeUser", + "orgAdmin", + "organization", + "principal", + "readOnlyAdmin" + ], + "properties": { + "principal": { + "type": "string" + }, + "gpsUser": { + "$ref": "#/definitions/EffectiveGpsUserPrivileges" + }, + "endUser": { + "$ref": "#/definitions/EffectiveEndUserPrivileges" + }, + "managedVolumeAdmin": { + "$ref": "#/definitions/EffectiveManagedVolumeAdminPrivileges" + }, + "organization": { + "$ref": "#/definitions/EffectiveOrganizationPrivileges" + }, + "admin": { + "$ref": "#/definitions/EffectiveAdminPrivileges" + }, + "readOnlyAdmin": { + "$ref": "#/definitions/EffectiveReadOnlyAdminPrivileges" + }, + "orgAdmin": { + "$ref": "#/definitions/EffectiveOrgAdminPrivileges" + }, + "managedVolumeUser": { + "$ref": "#/definitions/EffectiveManagedVolumeUserPrivileges" + }, + "complianceOfficer": { + "$ref": "#/definitions/EffectiveComplianceOfficerPrivileges" + }, + "infraAdmin": { + "$ref": "#/definitions/EffectiveInfraAdminPrivileges" + } + } + }, + "EffectiveAuthorizationsQuery": { + "type": "object", + "required": [ + "resources" + ], + "properties": { + "principal": { + "type": "string" + }, + "resources": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveComplianceOfficerPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveComplianceOfficerRole": { + "type": "object", + "required": [ + "basic" + ], + "properties": { + "basic": { + "type": "boolean" + } + } + }, + "EffectiveEndUserPrivileges": { + "type": "object", + "properties": { + "destructiveRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageProtection": { + "type": "array", + "items": { + "type": "string" + } + }, + "onDemandSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "refreshDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileDownload": { + "type": "array", + "items": { + "type": "string" + } + }, + "provisionOnInfra": { + "type": "array", + "items": { + "type": "string" + } + }, + "export": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "basic": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewReport": { + "type": "array", + "items": { + "type": "string" + } + }, + "liveMount": { + "type": "array", + "items": { + "type": "string" + } + }, + "downloadSnapshotFromArchive": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveEndUserRole": { + "type": "object", + "required": [ + "basic", + "destructiveRestore", + "downloadSnapshotFromArchive", + "export", + "fileDownload", + "fileRestore", + "liveMount", + "manageProtection", + "manageSla", + "onDemandSnapshot", + "provisionOnInfra", + "refreshDataSource", + "viewReport" + ], + "properties": { + "destructiveRestore": { + "type": "boolean" + }, + "manageProtection": { + "type": "boolean" + }, + "onDemandSnapshot": { + "type": "boolean" + }, + "refreshDataSource": { + "type": "boolean" + }, + "fileDownload": { + "type": "boolean" + }, + "provisionOnInfra": { + "type": "boolean" + }, + "export": { + "type": "boolean" + }, + "fileRestore": { + "type": "boolean" + }, + "basic": { + "type": "boolean" + }, + "viewReport": { + "type": "boolean" + }, + "liveMount": { + "type": "boolean" + }, + "downloadSnapshotFromArchive": { + "type": "boolean" + }, + "manageSla": { + "type": "boolean" + } + } + }, + "EffectiveGpsUserPrivileges": { + "type": "object", + "properties": { + "manageProtection": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileDownload": { + "type": "array", + "items": { + "type": "string" + } + }, + "exportSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "downloadSnapshotFromArchive": { + "type": "array", + "items": { + "type": "string" + } + }, + "fullAdmin": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "destructiveRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "exportFiles": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "onDemandSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "complianceOfficer": { + "type": "array", + "items": { + "type": "string" + } + }, + "deleteSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "provisionOnInfra": { + "type": "array", + "items": { + "type": "string" + } + }, + "refreshDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "liveMount": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveGpsUserRole": { + "type": "object", + "required": [ + "complianceOfficer", + "deleteSnapshot", + "destructiveRestore", + "downloadSnapshotFromArchive", + "exportFiles", + "exportSnapshot", + "fileDownload", + "fullAdmin", + "liveMount", + "manageDataSource", + "manageNetworkSettings", + "manageProtection", + "manageSupportSettings", + "manageSystemSettings", + "onDemandSnapshot", + "provisionOnInfra", + "refreshDataSource", + "viewDataSource", + "viewNetworkSettings", + "viewSla", + "viewSupportSettings", + "viewSystemSettings" + ], + "properties": { + "manageProtection": { + "type": "boolean" + }, + "fileDownload": { + "type": "boolean" + }, + "exportSnapshot": { + "type": "boolean" + }, + "downloadSnapshotFromArchive": { + "type": "boolean" + }, + "fullAdmin": { + "type": "boolean" + }, + "viewSupportSettings": { + "type": "boolean" + }, + "viewSla": { + "type": "boolean" + }, + "manageNetworkSettings": { + "type": "boolean" + }, + "viewDataSource": { + "type": "boolean" + }, + "destructiveRestore": { + "type": "boolean" + }, + "manageDataSource": { + "type": "boolean" + }, + "exportFiles": { + "type": "boolean" + }, + "viewNetworkSettings": { + "type": "boolean" + }, + "manageSystemSettings": { + "type": "boolean" + }, + "onDemandSnapshot": { + "type": "boolean" + }, + "complianceOfficer": { + "type": "boolean" + }, + "deleteSnapshot": { + "type": "boolean" + }, + "manageSupportSettings": { + "type": "boolean" + }, + "provisionOnInfra": { + "type": "boolean" + }, + "refreshDataSource": { + "type": "boolean" + }, + "viewSystemSettings": { + "type": "boolean" + }, + "liveMount": { + "type": "boolean" + } + } + }, + "EffectiveInfraAdminPrivileges": { + "type": "object", + "properties": { + "manageDataSources": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveInfraAdminRole": { + "type": "object", + "required": [ + "manageAccess", + "manageDataSources", + "manageNetworkSettings", + "manageSupportSettings", + "manageSystemSettings" + ], + "properties": { + "manageDataSources": { + "type": "boolean" + }, + "manageSupportSettings": { + "type": "boolean" + }, + "manageAccess": { + "type": "boolean" + }, + "manageSystemSettings": { + "type": "boolean" + }, + "manageNetworkSettings": { + "type": "boolean" + } + } + }, + "EffectiveManagedVolumeAdminPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveManagedVolumeAdminRole": { + "type": "object", + "required": [ + "basic" + ], + "properties": { + "basic": { + "type": "boolean" + } + } + }, + "EffectiveManagedVolumeUserPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveManagedVolumeUserRole": { + "type": "object", + "required": [ + "basic" + ], + "properties": { + "basic": { + "type": "boolean" + } + } + }, + "EffectiveOrgAdminPrivileges": { + "type": "object", + "properties": { + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "basicOrg": { + "type": "array", + "items": { + "type": "string" + } + }, + "useSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "updateSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "createUser": { + "type": "array", + "items": { + "type": "string" + } + }, + "createGlobal": { + "type": "array", + "items": { + "type": "string" + } + }, + "createSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "basic": { + "type": "array", + "items": { + "type": "string" + } + }, + "registerHost": { + "type": "array", + "items": { + "type": "string" + } + }, + "deleteSnapshots": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "expireImmediatelyOnUnprotect": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveOrgAdminRole": { + "type": "object", + "required": [ + "basic", + "basicOrg", + "createGlobal", + "createSla", + "createUser", + "deleteSnapshots", + "expireImmediatelyOnUnprotect", + "manageAccess", + "manageSla", + "registerHost", + "updateSla", + "useSla" + ], + "properties": { + "manageAccess": { + "type": "boolean" + }, + "basicOrg": { + "type": "boolean" + }, + "useSla": { + "type": "boolean" + }, + "updateSla": { + "type": "boolean" + }, + "createUser": { + "type": "boolean" + }, + "createGlobal": { + "type": "boolean" + }, + "createSla": { + "type": "boolean" + }, + "basic": { + "type": "boolean" + }, + "registerHost": { + "type": "boolean" + }, + "deleteSnapshots": { + "type": "boolean" + }, + "manageSla": { + "type": "boolean" + }, + "expireImmediatelyOnUnprotect": { + "type": "boolean" + } + } + }, + "EffectiveOrganizationPrivileges": { + "type": "object", + "properties": { + "manageCluster": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewLocalLdapSerice": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageResource": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageRestoreSourceBase": { + "type": "array", + "items": { + "type": "string" + } + }, + "useSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSelf": { + "type": "array", + "items": { + "type": "string" + } + }, + "createGlobal": { + "type": "array", + "items": { + "type": "string" + } + }, + "addAuthDomain": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageAuthDomain": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewOrg": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageRestoreSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageRestoreDestination": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewPrecannedReport": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveOrganizationRole": { + "type": "object", + "required": [ + "addAuthDomain", + "createGlobal", + "manageAccess", + "manageAuthDomain", + "manageCluster", + "manageDataSource", + "manageResource", + "manageRestoreDestination", + "manageRestoreSource", + "manageRestoreSourceBase", + "manageSelf", + "manageSla", + "useSla", + "viewLocalLdapSerice", + "viewOrg", + "viewPrecannedReport" + ], + "properties": { + "manageCluster": { + "type": "boolean" + }, + "viewLocalLdapSerice": { + "type": "boolean" + }, + "manageDataSource": { + "type": "boolean" + }, + "manageAccess": { + "type": "boolean" + }, + "manageResource": { + "type": "boolean" + }, + "manageRestoreSourceBase": { + "type": "boolean" + }, + "useSla": { + "type": "boolean" + }, + "manageSelf": { + "type": "boolean" + }, + "createGlobal": { + "type": "boolean" + }, + "addAuthDomain": { + "type": "boolean" + }, + "manageAuthDomain": { + "type": "boolean" + }, + "viewOrg": { + "type": "boolean" + }, + "manageRestoreSource": { + "type": "boolean" + }, + "manageSla": { + "type": "boolean" + }, + "manageRestoreDestination": { + "type": "boolean" + }, + "viewPrecannedReport": { + "type": "boolean" + } + } + }, + "EffectiveReadOnlyAdminPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EffectiveReadOnlyAdminRole": { + "type": "object", + "required": [ + "basic" + ], + "properties": { + "basic": { + "type": "boolean" + } + } + }, + "EndUserPrivileges": { + "type": "object", + "properties": { + "destructiveRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageProtection": { + "type": "array", + "items": { + "type": "string" + } + }, + "onDemandSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "refreshDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileDownload": { + "type": "array", + "items": { + "type": "string" + } + }, + "provisionOnInfra": { + "type": "array", + "items": { + "type": "string" + } + }, + "export": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "basic": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewReport": { + "type": "array", + "items": { + "type": "string" + } + }, + "liveMount": { + "type": "array", + "items": { + "type": "string" + } + }, + "downloadSnapshotFromArchive": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "GpsUserPrivileges": { + "type": "object", + "properties": { + "manageProtection": { + "type": "array", + "items": { + "type": "string" + } + }, + "fileDownload": { + "type": "array", + "items": { + "type": "string" + } + }, + "exportSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "downloadSnapshotFromArchive": { + "type": "array", + "items": { + "type": "string" + } + }, + "fullAdmin": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "destructiveRestore": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "exportFiles": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "onDemandSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "complianceOfficer": { + "type": "array", + "items": { + "type": "string" + } + }, + "deleteSnapshot": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "provisionOnInfra": { + "type": "array", + "items": { + "type": "string" + } + }, + "refreshDataSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "viewSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "liveMount": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "InfraAdminPrivileges": { + "type": "object", + "properties": { + "manageDataSources": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSupportSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSystemSettings": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageNetworkSettings": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ManagedVolumeAdminPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ManagedVolumeUserPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "OrgAdminPrivileges": { + "type": "object", + "properties": { + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "basicOrg": { + "type": "array", + "items": { + "type": "string" + } + }, + "useSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "basic": { + "type": "array", + "items": { + "type": "string" + } + }, + "registerHost": { + "type": "array", + "items": { + "type": "string" + } + }, + "deleteSnapshots": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "expireImmediatelyOnUnprotect": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "OrganizationPrivileges": { + "type": "object", + "properties": { + "manageCluster": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageAccess": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageResource": { + "type": "array", + "items": { + "type": "string" + } + }, + "useSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "addAuthDomain": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageRestoreSource": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageSla": { + "type": "array", + "items": { + "type": "string" + } + }, + "manageRestoreDestination": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ReadOnlyAdminPrivileges": { + "type": "object", + "properties": { + "basic": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "AuthorizationQuery": { + "type": "object", + "required": [ + "roleIds" + ], + "properties": { + "roleIds": { + "type": "array", + "description": "An array of role IDs to perform an authorization query on.", + "items": { + "type": "string" + } + } + } + }, + "AuthorizationSpecification": { + "type": "object", + "required": [ + "privilege", + "resources" + ], + "properties": { + "privilege": { + "type": "string", + "description": "The privilege to grant." + }, + "resources": { + "type": "array", + "items": { + "type": "string", + "description": "The resource to which the privilege grants access." + } + } + } + }, + "AuthorizationSpecifications": { + "type": "object", + "required": [ + "authorizationSpecifications" + ], + "properties": { + "authorizationSpecifications": { + "type": "array", + "items": { + "$ref": "#/definitions/AuthorizationSpecification" + } + } + } + }, + "RoleAuthorizationSpecification": { + "allOf": [ + { + "$ref": "#/definitions/AuthorizationSpecifications" + }, + { + "type": "object", + "properties": { + "roleTemplate": { + "type": "string", + "description": "The role template that is the source of the privileges." + } + } + } + ] + }, + "RoleAuthorizationSummary": { + "allOf": [ + { + "$ref": "#/definitions/AuthorizationSpecifications" + }, + { + "type": "object", + "required": [ + "organizationId", + "roleId", + "roleTemplate" + ], + "properties": { + "roleId": { + "type": "string", + "description": "The ID of the role with the specified authorizations." + }, + "roleTemplate": { + "type": "string", + "description": "The role template that is the source of the authorizations." + }, + "organizationId": { + "type": "string", + "description": "The organization ID of the organization that owns the role." + } + } + } + ] + }, + "RoleAuthorizationSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/RoleAuthorizationSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AccountListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for AWS account instance objects.", + "enum": [ + "Name", + "Status" + ] + }, + "AwsAccountCreate": { + "allOf": [ + { + "$ref": "#/definitions/BaseAwsAccount" + }, + { + "type": "object", + "required": [ + "secretKey" + ], + "properties": { + "secretKey": { + "type": "string", + "description": "AWS Account Secret Key.", + "x-secret": true + } + } + } + ] + }, + "AwsAccountDetail": { + "allOf": [ + { + "$ref": "#/definitions/BaseAwsAccount" + }, + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "properties": { + "dcaAccountDetails": { + "description": "Optional DCA AWS account details.", + "$ref": "#/definitions/DcaAwsAccount" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "AwsAccountResourceIdObject": { + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "An array containing the AWS IDs of EC2 instances.\n", + "items": { + "type": "string" + } + }, + "volumes": { + "type": "array", + "description": "An array containing the AWS IDs of EBS volumes.\n", + "items": { + "type": "string" + } + }, + "snapshots": { + "type": "array", + "description": "An array containing the AWS IDs of AWS snapshots.\n", + "items": { + "type": "string" + } + }, + "images": { + "type": "array", + "description": "An array containing the AWS IDs of Images.\n", + "items": { + "type": "string" + } + } + } + }, + "AwsAccountStatus": { + "type": "string", + "description": "AWS account status.", + "enum": [ + "Connected", + "Disconnected", + "Refreshing", + "Deleting", + "DeletionFailed" + ] + }, + "AwsAccountSummary": { + "type": "object", + "required": [ + "id", + "name", + "primaryClusterId", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the AWS account." + }, + "name": { + "type": "string", + "description": "Name of the AWS account." + }, + "primaryClusterId": { + "type": "string", + "description": "ID of the cluster which manages data present in this AWS account.\n" + }, + "status": { + "description": "AWS account status.", + "$ref": "#/definitions/AwsAccountStatus" + } + } + }, + "AwsAccountSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsAccountSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsAccountUpdate": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the AWS account." + }, + "accessKey": { + "type": "string", + "description": "AWS Account Access Key." + }, + "secretKey": { + "type": "string", + "description": "AWS Account Secret Key.", + "x-secret": true + }, + "regions": { + "type": "array", + "description": "List of AWS regions.", + "items": { + "type": "string" + } + }, + "regionalBoltNetworkConfigs": { + "type": "array", + "description": "List of Network config for launching bolt in a particular region.\n", + "items": { + "$ref": "#/definitions/RegionalBoltNetworkConfig" + } + }, + "disasterRecoveryArchivalLocationId": { + "type": "string", + "description": "Id of the Archival Location to use for Disaster Recovery.\n" + } + } + }, + "BaseAwsAccount": { + "type": "object", + "required": [ + "accessKey", + "name", + "regions" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the AWS account." + }, + "accessKey": { + "type": "string", + "description": "AWS Account Access Key." + }, + "regions": { + "type": "array", + "description": "List of AWS regions.", + "items": { + "type": "string" + } + }, + "regionalBoltNetworkConfigs": { + "type": "array", + "description": "List of Network config for launching bolt in a particular region.\n", + "items": { + "$ref": "#/definitions/RegionalBoltNetworkConfig" + } + }, + "disasterRecoveryArchivalLocationId": { + "type": "string", + "description": "Id of the Archival Location to use for Disaster Recovery.\n" + } + } + }, + "DcaAwsAccount": { + "type": "object", + "required": [ + "agency", + "awsEndpoint", + "capEndpoint", + "certificateId", + "mission", + "name", + "role" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the DCA AWS account." + }, + "agency": { + "type": "string", + "description": "Name of agency to pass to CAP server." + }, + "mission": { + "type": "string", + "description": "Name of mission to pass to CAP server." + }, + "role": { + "type": "string", + "description": "Name of IAM role to use for accessing DCA. Given to CAP server." + }, + "capEndpoint": { + "type": "string", + "description": "URL of CAP credentials server." + }, + "awsEndpoint": { + "type": "string", + "description": "AWS endpoint URL." + }, + "tokenDurationMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration of validity of temporary credentials. Allowed range: 15 to 60. Default: 60 minutes.\n" + }, + "certificateId": { + "type": "string", + "description": "Id of the ceritificate to be used to authenticate to the CAP server." + } + } + }, + "DcaAwsAccountCreate": { + "allOf": [ + { + "$ref": "#/definitions/DcaAwsAccount" + } + ] + }, + "DcaAwsAccountUpdate": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the DCA AWS account." + }, + "agency": { + "type": "string", + "description": "Name of agency to pass to CAP server." + }, + "mission": { + "type": "string", + "description": "Name of mission to pass to CAP server." + }, + "role": { + "type": "string", + "description": "Name of IAM role to use for accessing DCA. Given to CAP server." + }, + "capEndpoint": { + "type": "string", + "description": "URL of CAP credentials server." + }, + "awsEndpoint": { + "type": "string", + "description": "AWS endpoint URL." + }, + "tokenDurationMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration of validity of temporary credentials. Allowed range: 15 to 60. Default: 60 minutes.\n" + }, + "certificateId": { + "type": "string", + "description": "Id of the ceritificate to be used to authenticate to the CAP server." + } + } + }, + "RegionalBoltNetworkConfig": { + "type": "object", + "required": [ + "region", + "securityGroupId", + "subnetId", + "vNetId" + ], + "properties": { + "region": { + "type": "string", + "description": "Region in the AWS account." + }, + "vNetId": { + "type": "string", + "description": "ID of the virtual network to assign to the Rubrik CDM instance.\n" + }, + "subnetId": { + "type": "string", + "description": "ID of the subnet to assign to the Rubrik CDM instance." + }, + "securityGroupId": { + "type": "string", + "description": "ID of the security group to assign to the Rubrik CDM instance.\n" + } + } + }, + "AwsEc2IndexingConfiguration": { + "type": "string", + "description": "Combined information of Indexing Configuration of the associated Aws Region and indexing state of the EC2 Instance. Configuration means region based network configurations and indexing state means indexing manually enabled or disabled by the user per instance.", + "enum": [ + "ConfiguredAndSet", + "ConfiguredAndUnset", + "UnconfiguredAndSet", + "UnconfiguredAndUnset" + ] + }, + "AwsEc2InstanceDetail": { + "allOf": [ + { + "$ref": "#/definitions/AwsEc2InstanceSummary" + }, + { + "type": "object", + "required": [ + "isRelic", + "operatingSystemType", + "storageVolumeIds" + ], + "properties": { + "operatingSystemType": { + "description": "Operating system of a specified EC2 instance.", + "$ref": "#/definitions/OperatingSystemType" + }, + "vpcId": { + "type": "string", + "description": "ID assigned to the virtual private cloud object that contains a specified EC2 instance." + }, + "vpcName": { + "type": "string", + "description": "Name of the virtual private cloud object that contains a specified EC2 instance." + }, + "storageVolumeIds": { + "type": "array", + "description": "Array containing the object ID of each storage volume that is attached to a specified EC2 instance.", + "items": { + "type": "string" + } + }, + "isRelic": { + "type": "boolean", + "description": "Whether this EC2 instance is a relic (an archived snappable with unexpired snapshots)." + } + } + } + ] + }, + "AwsEc2InstanceDownloadFilesConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array that contains the full source path of each file and folder to download. This array cannot be empty.", + "items": { + "type": "string" + } + } + } + }, + "AwsEc2InstanceInplaceRestoreConfig": { + "type": "object", + "required": [ + "snapshotId" + ], + "properties": { + "snapshotId": { + "type": "string", + "description": "Snapshot ID of an EC2 instance object from which to in-place restore the EC2 instance." + }, + "powerOn": { + "type": "boolean", + "description": "Determines whether the EC2 instance should be powered on after in-place restore. Set to 'true' to power on the EC2 instance. Set to 'false' to in-place restore the EC2 instance but not power it on. The default is 'true'.", + "default": true + } + } + }, + "AwsEc2InstanceListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for AWS EC2 instance objects.", + "enum": [ + "instanceId", + "instanceName", + "instanceType", + "accountName", + "region", + "effectiveSlaDomainName", + "slaAssignment" + ] + }, + "AwsEc2InstanceSlaObjectCount": { + "type": "object", + "properties": { + "numEc2Instances": { + "type": "integer", + "format": "int32", + "description": "The number of EC2 instances protected under this SLA Domain." + } + } + }, + "AwsEc2InstanceSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/AwsEc2InstanceSnapshotSummary" + } + ] + }, + "AwsEc2InstanceSnapshotExportConfig": { + "type": "object", + "required": [ + "instanceName", + "instanceType", + "region", + "securityGroupId", + "subnetId" + ], + "properties": { + "instanceName": { + "type": "string", + "description": "Name to assign to instance being launched." + }, + "instanceType": { + "type": "string", + "description": "EC2 Instance Type to use for the instance being launched." + }, + "region": { + "type": "string", + "description": "Region in which the AWS instance to be exported." + }, + "subnetId": { + "type": "string", + "description": "ID of the subnet to assign to the instance being launched." + }, + "securityGroupId": { + "type": "string", + "description": "ID of the security group to assign to the instance being launched." + } + } + }, + "AwsEc2InstanceSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "accountId", + "imageId", + "instanceId", + "snapshotVolumeIds" + ], + "properties": { + "accountId": { + "type": "string", + "description": "ID used by Rubrik for a Cloud Soure account to which this snapshot belongs." + }, + "instanceId": { + "type": "string", + "description": "ID used by AWS for the EC2 instance." + }, + "imageId": { + "type": "string", + "description": "ID used by AWS for storing the image of a E2 Instance." + }, + "snapshotVolumeIds": { + "type": "array", + "description": "Array containing aws snapshot ID of each storage volume that was snapshotted.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "AwsEc2InstanceSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsEc2InstanceSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsEc2InstanceSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "accountId", + "accountName", + "indexingConfiguration", + "instanceId", + "instanceType", + "isDcaAccountInstance", + "numMissedSnapshot", + "region" + ], + "properties": { + "instanceId": { + "type": "string", + "description": "AWS instance ID. For EC2 instances, this value appears in the Name field." + }, + "instanceName": { + "type": "string", + "description": "AWS instance name." + }, + "instanceType": { + "type": "string", + "description": "AWS instance type." + }, + "accountId": { + "type": "string", + "description": "ID assigned to an AWS account instance object." + }, + "accountName": { + "type": "string", + "description": "Name assigned to an AWS account instance object." + }, + "isDcaAccountInstance": { + "type": "boolean", + "description": "A Boolean that specifies whether the instance belongs to a DCA account. When this value is 'true', the instance belongs to a DCA account." + }, + "region": { + "type": "string", + "description": "AWS instance region." + }, + "indexingConfiguration": { + "$ref": "#/definitions/AwsEc2IndexingConfiguration" + }, + "numMissedSnapshot": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of missed snapshots." + }, + "lastSnapshotTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp of the previous snapshot." + }, + "includeBackupTaskInfo": { + "type": "boolean", + "description": "True/false value indicating if backup task information is included in the response." + }, + "currentBackupTaskInfo": { + "description": "Information about the current backup task.", + "$ref": "#/definitions/BackupTaskDiagnosticInfo" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "AwsEc2InstanceSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsEc2InstanceSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsEc2InstanceUpdate": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "ID assigned to the SLA Domain object that is configured for a specified EC2 instance. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + } + } + }, + "AwsEc2InstancesIndexingStateUpdateConfig": { + "type": "object", + "required": [ + "instanceIds", + "setIndexingState" + ], + "properties": { + "instanceIds": { + "type": "array", + "description": "An array that contains the EC2 instance IDs of the instances for which to update the indexing state.", + "items": { + "type": "string" + } + }, + "setIndexingState": { + "type": "boolean", + "description": "A Boolean value that specifies whether indexing is enabled. When false, indexing is disabled. When true, indexing is enabled." + } + } + }, + "AwsHierarchyObjectDescendantCount": { + "type": "object", + "properties": { + "AwsAccount": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "Ec2Instance": { + "type": "integer", + "format": "int32", + "minimum": 0 + } + } + }, + "AwsHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendantCount", + "isDeleted", + "objectType" + ], + "properties": { + "accountName": { + "type": "string", + "description": "Object name for an AWS account object." + }, + "instanceId": { + "type": "string", + "description": "Object ID for an EC2 instance object." + }, + "instanceName": { + "type": "string", + "description": "Object name for an EC2 instance object." + }, + "instanceType": { + "type": "string", + "description": "EC2 instance type for an EC2 instance object." + }, + "region": { + "type": "string", + "description": "AWS Region for an EC2 instance object." + }, + "objectType": { + "description": "Will be AwsAccount or Ec2Instance.", + "$ref": "#/definitions/ObjectType" + }, + "status": { + "description": "Connection status of AWS account.", + "$ref": "#/definitions/AwsAccountStatus" + }, + "descendantCount": { + "description": "Number of descendants of the object in the hierarchy.", + "$ref": "#/definitions/AwsHierarchyObjectDescendantCount" + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the aws hierarchy object is deleted." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "AwsHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsObjectListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for AWS hierarchy objects. Some attributes only apply to the AWS account object and some only apply to AWS instance objects.", + "enum": [ + "name", + "accountName", + "instanceId", + "instanceName", + "instanceType", + "region", + "effectiveSlaDomainName", + "slaAssignment", + "descendantCountEc2Instance" + ] + }, + "AwsEc2InstanceStorageVolumeDetail": { + "type": "object", + "required": [ + "excludeFromSnapshots", + "id", + "rootVolume", + "volumeId", + "volumePath", + "volumeSize" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to an AWS storage volume object." + }, + "volumeId": { + "type": "string", + "description": "ID used by AWS to identify an AWS volume." + }, + "volumePath": { + "type": "string", + "description": "Path used by AWS to identify the location of a volume." + }, + "volumeSize": { + "type": "integer", + "format": "int64", + "description": "Volume size expressed in gigabytes." + }, + "excludeFromSnapshots": { + "type": "boolean", + "description": "Determines whether to prevent snapshots of a volume. Use 'yes' to prevent snapshots, or use 'no' to allow snapshots." + }, + "rootVolume": { + "type": "boolean", + "description": "Specifies whether the volume is the root volume. Use 'yes' when the volume is the root volume, or use 'no' when the volume is not the root volume." + } + } + }, + "AwsEc2InstanceStorageVolumeDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsEc2InstanceStorageVolumeDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsEc2InstanceStorageVolumeListSortAttribute": { + "type": "string", + "description": "List of attributes that can be used to sort the results of a query of AWS storage volume objects.", + "enum": [ + "VolumePath", + "VolumeId", + "VolumeSize" + ] + }, + "AwsEc2InstanceStorageVolumeUpdateConfig": { + "type": "object", + "required": [ + "excludeFromSnapshots" + ], + "properties": { + "excludeFromSnapshots": { + "type": "boolean", + "description": "Determines whether to prevent snapshots of a volume. Use 'yes' to prevent snapshots, or use 'no' to allow snapshots." + } + } + }, + "BlackoutWindow": { + "type": "object", + "required": [ + "startTime" + ], + "properties": { + "startTime": { + "type": "string" + }, + "endTime": { + "type": "string" + } + } + }, + "BlackoutWindowResponseInfo": { + "type": "object", + "required": [ + "blackoutWindowStatus", + "blackoutWindows" + ], + "properties": { + "blackoutWindowStatus": { + "$ref": "#/definitions/BlackoutWindowStatus" + }, + "blackoutWindows": { + "$ref": "#/definitions/BlackoutWindows" + } + } + }, + "BlackoutWindowStatus": { + "type": "object", + "required": [ + "isGlobalBlackoutActive" + ], + "properties": { + "isGlobalBlackoutActive": { + "type": "boolean" + }, + "isSnappableBlackoutActive": { + "type": "boolean" + } + } + }, + "BlackoutWindows": { + "type": "object", + "required": [ + "globalBlackoutWindows" + ], + "properties": { + "globalBlackoutWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BlackoutWindow" + } + }, + "snappableBlackoutWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BlackoutWindow" + } + } + } + }, + "GlobalBlackoutWindowStatus": { + "type": "object", + "required": [ + "isGlobalBlackoutActive" + ], + "properties": { + "isGlobalBlackoutActive": { + "type": "boolean" + } + } + }, + "AgentSecondaryCertificateInfo": { + "type": "object", + "required": [ + "certId", + "clusterUuid", + "isAgentEnabled", + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Display name for the certificate." + }, + "certId": { + "type": "string", + "description": "ID of the certificate." + }, + "clusterUuid": { + "type": "string", + "description": "Parsed cluster ID from the certificate." + }, + "isAgentEnabled": { + "type": "boolean", + "description": "Whether this certificate has been marked for use by agents." + } + } + }, + "CertificateImportRequest": { + "type": "object", + "required": [ + "name", + "pemFile" + ], + "properties": { + "name": { + "type": "string", + "description": "Display name for the certificate." + }, + "pemFile": { + "type": "string", + "description": "The certificates, and optionally private key to be imported, in PEM format.", + "x-secret": true + }, + "privateKey": { + "type": "string", + "description": "The private key, in PEM format, to be imported. If a private key is provided using this field instead of the pemFile field, the import fails if the private key is not successfully parsed.", + "x-secret": true + }, + "description": { + "type": "string", + "description": "User-friendly description for the certificate." + }, + "csrId": { + "type": "string", + "description": "ID of the certificate signing request (CSR) associated with the imported certificate." + } + } + }, + "CertificatePatchRequest": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Display name for the certificate." + }, + "description": { + "type": "string", + "description": "User-friendly description for the certificate." + }, + "pemFile": { + "type": "string", + "description": "The certificates to be imported, in PEM format.", + "x-secret": true + } + } + }, + "CertificateSummary": { + "type": "object", + "required": [ + "certId", + "expiration", + "hasKey", + "name", + "pemFile", + "usedBy" + ], + "properties": { + "certId": { + "type": "string", + "description": "ID of the certificate." + }, + "name": { + "type": "string", + "description": "Display name for the certificate." + }, + "pemFile": { + "type": "string", + "description": "The certificates, in PEM format." + }, + "hasKey": { + "type": "boolean", + "description": "A Boolean value that specifies whether or not the certificate is associated with a stored private key. When this value is 'true,' the private key for the certificate is stored. When this value is 'false,' the private key for the certificate is not stored." + }, + "expiration": { + "type": "string", + "format": "date-time", + "description": "The expiration date for the certificate." + }, + "usedBy": { + "type": "string", + "description": "A list of components using the certificate." + }, + "description": { + "type": "string", + "description": "User-friendly description for the certificate." + } + } + }, + "CsrRequest": { + "type": "object", + "required": [ + "hostnames" + ], + "properties": { + "hostnames": { + "type": "array", + "description": "A comma-separated list of host names that are associated with the certificate. This list accepts wildcard hostnames, such as '*.rubrik.example.com', in addition to fully-qualified domain names.", + "items": { + "type": "string" + } + }, + "organization": { + "type": "string", + "description": "The name of the organization associated with the certificate, for example 'Rubrik, Inc.'." + }, + "organizationUnit": { + "type": "string", + "description": "The name of the organizational unit associated with the certificate, for example, Engineering." + }, + "country": { + "type": "string", + "description": "The two-letter ISO code for the country where the provided organization is located, for example US, GB, or FR." + }, + "state": { + "type": "string", + "description": "If applicable, the name of the state where the provided organization is located, for example, California." + }, + "city": { + "type": "string", + "description": "The city where the provided organization is located, for example, New York or London." + }, + "surname": { + "type": "string", + "description": "The surname associated with the certificate, if applicable." + }, + "uid": { + "type": "string", + "description": "The user ID associated with the certificate, if applicable." + }, + "emailAddress": { + "type": "string", + "description": "An email address associated with the organization, if applicable." + } + } + }, + "CsrSummary": { + "type": "object", + "required": [ + "csr", + "csrId", + "hostnames", + "name", + "subject" + ], + "properties": { + "csrId": { + "type": "string", + "description": "ID of the generated CSR." + }, + "name": { + "type": "string", + "description": "Display name for the generated CSR." + }, + "hostnames": { + "type": "array", + "description": "A comma-separated list of host names that are associated with the certificate. This list accepts wildcard hostnames, such as '*.rubrik.example.com', in addition to fully-qualified domain names.", + "items": { + "type": "string" + } + }, + "subject": { + "type": "string", + "description": "Subject line of the CSR." + }, + "csr": { + "type": "string", + "description": "Base64 encoded PKCS#10 certificate signing request. The request should start with -----BEGIN CERTIFICATE REQUEST-----." + } + } + }, + "GenericCsrRequest": { + "type": "object", + "required": [ + "csrRequest", + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Display name for the generated CSR." + }, + "csrRequest": { + "$ref": "#/definitions/CsrRequest" + } + } + }, + "Environment": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Azure environment name." + } + } + }, + "Region": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "Cloud provider region ID." + }, + "name": { + "type": "string", + "description": "Cloud provider region name." + } + } + }, + "SecurityGroup": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a security group object." + }, + "name": { + "type": "string", + "description": "Name of a security group object." + } + } + }, + "SecurityGroupListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SecurityGroup" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "Subnet": { + "type": "object", + "required": [ + "isDefault", + "subnetCidrBlock", + "subnetId", + "vNetId" + ], + "properties": { + "subnetId": { + "type": "string", + "description": "ID assigned to a subnet object." + }, + "subnetName": { + "type": "string", + "description": "Name of a subnet object." + }, + "vNetId": { + "type": "string", + "description": "ID assigned to a virtual network object." + }, + "vNetName": { + "type": "string", + "description": "Name of a virtual network object." + }, + "subnetCidrBlock": { + "type": "string", + "description": "Address prefix used by a subnet, using standard CIDR notation." + }, + "availabilityZone": { + "type": "string", + "description": "Availability zone of a virtual network." + }, + "region": { + "type": "string", + "description": "Administrative region of a virtual network." + }, + "isDefault": { + "type": "boolean", + "description": "Boolean value that indicates whether a subnet is the default. Value is 'true' when the subnet is the default." + } + } + }, + "SubnetListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/Subnet" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VirtualNetwork": { + "type": "object", + "required": [ + "name", + "virtualNetworkId" + ], + "properties": { + "virtualNetworkId": { + "type": "string", + "description": "ID assigned to a virtual network." + }, + "name": { + "type": "string", + "description": "Name of a virtual network object." + } + } + }, + "AppBlueprintImageListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for AWS AppBlueprint cloud images.", + "enum": [ + "AppBlueprintName", + "LocationName", + "SnapshotTime", + "CreationTime" + ] + }, + "AppCloudImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/AppCloudImageSummary" + }, + { + "type": "object", + "required": [ + "links" + ], + "properties": { + "links": { + "$ref": "#/definitions/CloudImageSummaryLinks" + } + } + } + ] + }, + "AppCloudImageSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseCloudImageSummary" + }, + { + "type": "object", + "required": [ + "childImageIds" + ], + "properties": { + "childImageIds": { + "type": "array", + "description": "An array containing the ID for each image that is contained by the specified app image.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "AwsAppImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/AppCloudImageDetail" + } + ] + }, + "AwsAppImageSummary": { + "allOf": [ + { + "$ref": "#/definitions/AppCloudImageSummary" + } + ] + }, + "AwsAppImageSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsAppImageSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageDetail" + } + ] + }, + "AwsImageSummary": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageSummary" + } + ] + }, + "AwsImageSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsImageSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AwsInstanceDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudInstanceDetail" + } + ] + }, + "AwsInstanceSummary": { + "allOf": [ + { + "$ref": "#/definitions/CloudInstanceSummary" + } + ] + }, + "AwsInstanceSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AwsInstanceSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AzureAppImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/AppCloudImageDetail" + } + ] + }, + "AzureImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageDetail" + } + ] + }, + "AzureImageSummary": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageSummary" + } + ] + }, + "AzureImageSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AzureImageSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AzureInstanceDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudInstanceDetail" + } + ] + }, + "AzureInstanceSummary": { + "allOf": [ + { + "$ref": "#/definitions/CloudInstanceSummary" + } + ] + }, + "AzureInstanceSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AzureInstanceSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AzureResourceGroupCredential": { + "allOf": [ + { + "$ref": "#/definitions/AzureSubscriptionCredential" + }, + { + "type": "object", + "required": [ + "subscriptionId" + ], + "properties": { + "subscriptionId": { + "type": "string", + "description": "Azure subscription id." + } + } + } + ] + }, + "AzureResourceGroupInfoSource": { + "type": "string", + "description": "The type of data that is used to fetch resource group. It can be retrieved either by azure storage account credentials or archival data location ID.\n", + "enum": [ + "DataLocation", + "AzureAccountCredentials" + ] + }, + "AzureResourceGroupRequest": { + "type": "object", + "required": [ + "source" + ], + "properties": { + "source": { + "description": "Source of the query, which can be the Azure account credentials or the location ID.\n", + "$ref": "#/definitions/AzureResourceGroupInfoSource" + }, + "resourceGroupCredential": { + "description": "The Azure account credentials that are required to fetch the resource group list.\n", + "$ref": "#/definitions/AzureResourceGroupCredential" + }, + "dataLocationId": { + "type": "string", + "description": "The data location ID of the Azure archival location." + }, + "computeProxyConfig": { + "description": "Compute proxy config.", + "$ref": "#/definitions/LocationProxyConfig" + } + } + }, + "AzureStorageAccountCredential": { + "allOf": [ + { + "$ref": "#/definitions/AzureSubscriptionCredential" + }, + { + "type": "object", + "required": [ + "regionName", + "subscriptionId" + ], + "properties": { + "subscriptionId": { + "type": "string", + "description": "Azure subscription id." + }, + "regionName": { + "type": "string", + "description": "Azure region name." + }, + "storageAccountType": { + "description": "Azure storage type - blob storage or general purpose.", + "$ref": "#/definitions/StorageAccountType" + } + } + } + ] + }, + "AzureStorageAccountRequest": { + "type": "object", + "required": [ + "storageAccountCredential" + ], + "properties": { + "storageAccountCredential": { + "description": "The Azure storage account credential.", + "$ref": "#/definitions/AzureStorageAccountCredential" + }, + "computeProxyConfig": { + "description": "Compute proxy config.", + "$ref": "#/definitions/LocationProxyConfig" + } + } + }, + "AzureSubscriptionCredential": { + "type": "object", + "required": [ + "clientId", + "clientSecret", + "environment", + "tenantId" + ], + "properties": { + "tenantId": { + "type": "string", + "description": "Azure tenant id." + }, + "clientId": { + "type": "string", + "description": "Azure client application id." + }, + "clientSecret": { + "type": "string", + "description": "Azure client application secret key.", + "x-secret": true + }, + "environment": { + "description": "Azure Environment.", + "$ref": "#/definitions/AzureSupportedEnvironment" + } + } + }, + "AzureSubscriptionRequest": { + "type": "object", + "required": [ + "subscriptionCredential" + ], + "properties": { + "subscriptionCredential": { + "description": "The Azure subscription credential.", + "$ref": "#/definitions/AzureSubscriptionCredential" + }, + "computeProxyConfig": { + "description": "Compute proxy config.", + "$ref": "#/definitions/LocationProxyConfig" + } + } + }, + "BaseCloudImageSummary": { + "type": "object", + "required": [ + "creationDate", + "id", + "links", + "locationId", + "locationName", + "snapshotCreationDate", + "snapshotId" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the image." + }, + "snapshotId": { + "type": "string", + "description": "The ID of the snapshot the image was generated from." + }, + "snappableId": { + "type": "string", + "description": "The ID of the snappable the snapshot was taken of. This can be used to get context about the image if the snapshot has since been expired. Will be empty if the snappable is not available.\n" + }, + "snappableName": { + "type": "string", + "description": "The name of the source snappable. Will be empty if the snappable is not available.\n" + }, + "snapshotCreationDate": { + "type": "string", + "format": "date-time", + "description": "The creation date of the snapshot this image is based on. This can be used to get context about the image if the snapshot has since been expired.\n" + }, + "locationId": { + "type": "string", + "description": "The ID of the location the image is on." + }, + "locationName": { + "type": "string", + "description": "The name of the location the image is on." + }, + "locationOwnershipStatus": { + "description": "The ownership status that the current cluster has with respect to the data location.\n", + "$ref": "#/definitions/DataLocationOwnershipStatus" + }, + "creationDate": { + "type": "string", + "format": "date-time", + "description": "The date the image was created." + }, + "expirationDate": { + "type": "string", + "format": "date-time", + "description": "The date the image is scheduled to expire." + }, + "imageType": { + "description": "The source or type of the image.", + "$ref": "#/definitions/ImageType" + }, + "links": { + "$ref": "#/definitions/CloudImageSummaryLinks" + } + } + }, + "CloudImageDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageSummary" + }, + { + "$ref": "#/definitions/SecuritySetting" + }, + { + "type": "object", + "required": [ + "instanceIds", + "links" + ], + "properties": { + "instanceIds": { + "type": "array", + "description": "All instances that have been instantiated from this image.", + "items": { + "type": "string" + } + }, + "links": { + "$ref": "#/definitions/CloudImageDetailLinks" + } + } + } + ] + }, + "CloudImageDetailLinks": { + "allOf": [ + { + "$ref": "#/definitions/CloudImageSummaryLinks" + }, + { + "type": "object", + "required": [ + "instances" + ], + "properties": { + "instances": { + "type": "array", + "items": { + "$ref": "#/definitions/Link" + } + } + } + } + ] + }, + "CloudImageSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseCloudImageSummary" + }, + { + "type": "object", + "properties": { + "recommendedInstanceType": { + "description": "The recommended instance type for the image.", + "$ref": "#/definitions/RecommendedInstanceType" + } + } + } + ] + }, + "CloudImageSummaryLinks": { + "type": "object", + "properties": { + "snappable": { + "$ref": "#/definitions/Link" + }, + "snapshot": { + "$ref": "#/definitions/Link" + }, + "location": { + "$ref": "#/definitions/Link" + } + } + }, + "CloudInstanceDetail": { + "allOf": [ + { + "$ref": "#/definitions/CloudInstanceSummary" + }, + { + "$ref": "#/definitions/SecuritySetting" + }, + { + "type": "object", + "properties": { + "identityFile": { + "type": "string", + "description": "Files for Aws identity management." + } + } + } + ] + }, + "CloudInstanceLinks": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/Link" + }, + "location": { + "$ref": "#/definitions/Link" + } + } + }, + "CloudInstanceSource": { + "type": "string", + "description": "The type of data that is used to create a cloud image. The CloudInstanceSource can be either a snapshot generated by a Rubrik instance or a pre-existing cloud image.\n", + "enum": [ + "Snapshot", + "Image" + ] + }, + "CloudInstanceSummary": { + "type": "object", + "required": [ + "creationDate", + "id", + "imageId", + "instanceClass", + "links", + "locationId", + "locationName", + "ownerId", + "ownerName" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the cloud instance." + }, + "snappableId": { + "type": "string", + "description": "The ID of the source snappable. Will be empty if the snappable is not available.\n" + }, + "snappableName": { + "type": "string", + "description": "The name of the source snappable. Will be empty if the snappable is not available.\n" + }, + "locationId": { + "type": "string", + "description": "The data location the cloud instance is on." + }, + "locationName": { + "type": "string", + "description": "The name of the location the cloud instance is on." + }, + "imageId": { + "type": "string", + "description": "The image the cloud instance was generated from." + }, + "creationDate": { + "type": "string", + "format": "date-time", + "description": "The date the instance was created." + }, + "snapshotCreationDate": { + "type": "string", + "format": "date-time", + "description": "The date the corresponding snapshot was taken." + }, + "terminationDate": { + "type": "string", + "format": "date-time", + "description": "The date the instance is scheduled to terminate." + }, + "instanceClass": { + "type": "string", + "description": "The type of instance for relative to Rubrik.", + "enum": [ + "UserDeployedRubrikOnCloudInstance", + "TransientRubrikOnCloudInstance", + "UserInstantiatedInstance" + ] + }, + "publicIpAddress": { + "type": "string", + "description": "The public IP address of the instance." + }, + "privateIpAddress": { + "type": "string", + "description": "The private IP address of the instance." + }, + "powerStatus": { + "type": "string", + "description": "The power status.", + "enum": [ + "PENDING", + "RUNNING", + "SHUTTING_DOWN", + "TERMINATED", + "STOPPING", + "STOPPED" + ] + }, + "ownerId": { + "type": "string", + "description": "ID of the user who requested the instance." + }, + "ownerName": { + "type": "string", + "description": "Displayable name of the user who requested the instance." + }, + "links": { + "$ref": "#/definitions/CloudInstanceLinks" + }, + "instanceType": { + "type": "string", + "description": "Instance type that determines the compute and memory size of the launched instance. Values are different for different cloud providers.\n" + } + } + }, + "CloudInstanceUpdate": { + "type": "object", + "required": [ + "powerStatus" + ], + "properties": { + "powerStatus": { + "type": "string", + "description": "The new power status of the instances.", + "enum": [ + "POWERSTATUS_ON", + "POWERSTATUS_OFF" + ] + } + } + }, + "CloudInstantiationSpec": { + "type": "object", + "required": [ + "imageRetentionInSeconds" + ], + "properties": { + "imageRetentionInSeconds": { + "type": "integer", + "format": "int64", + "description": "Specifies the number of seconds to retain an image file that is generated for a snappable. Setting this to -1 disables cloud instantiation for the snappable.\n" + } + } + }, + "CloudMountListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for AWS cloud mount objects.", + "enum": [ + "SourceVmName", + "LocationName", + "InstanceType", + "CreatedBy", + "Status", + "SnapshotTime", + "CreationTime" + ] + }, + "CreateCloudInstanceRequest": { + "type": "object", + "required": [ + "instanceType", + "instantiateLocationId", + "securityGroup", + "source", + "subnet", + "virtualNetwork" + ], + "properties": { + "source": { + "description": "The type of data that is used to create an instance. The source can be either a cloud image or a snapshot.\n", + "$ref": "#/definitions/CloudInstanceSource" + }, + "snappableId": { + "type": "string", + "description": "The snappable id to which the snapshot belongs to." + }, + "snapshotId": { + "type": "string", + "description": "The snapshot to create an instance from." + }, + "imageId": { + "type": "string", + "description": "The cloud image to create an instance from." + }, + "instantiateLocationId": { + "type": "string", + "description": "The data location to instantiate on." + }, + "instanceType": { + "type": "string", + "description": "Type of the launched instance." + }, + "securityGroup": { + "type": "string", + "description": "A security group that the instance will be launched with." + }, + "subnet": { + "type": "string", + "description": "Subnet in which the instance is to launch." + }, + "virtualNetwork": { + "type": "string", + "description": "Virtual network in which the instance is to launch." + }, + "encryptionKeyId": { + "type": "string", + "description": "KMS Key ID to use for encryption." + }, + "resourceGroup": { + "type": "string", + "description": "Resource group in which the instance is to launch.\n" + }, + "isVerifyOnlyMode": { + "type": "boolean", + "description": "If this option is set to true, resources such as disk snapshots and the final machine image are not created on cloud. A new instance is not launched at the end of conversion. No cleanup is required on success or failure of this job.\n" + }, + "shouldKeepConvertedDisksOnFailure": { + "type": "boolean", + "description": "When set to true, full conversion jobs retain successfully converted disks after a failure. When set to false, the default, converted disks are deleted after a failure as part of clean up.\n" + } + } + }, + "ImageType": { + "type": "string", + "description": "The type of cloud image.", + "enum": [ + "OnDemand", + "PolicyBased", + "Blueprint", + "Internal" + ] + }, + "RecommendedInstanceType": { + "type": "object", + "properties": { + "recommendedInstanceType": { + "type": "string", + "description": "Recommended instance type on the basis of number of CPU and memory.\n" + }, + "numCPUs": { + "type": "integer", + "format": "int32", + "description": "Number of CPUs for recommended instance type.\n" + }, + "memory": { + "type": "number", + "format": "double", + "description": "Memory of recommended instance type.\n" + }, + "localSSD": { + "type": "integer", + "format": "int32", + "description": "LocalSSD value for recommended instance type.\n" + }, + "maxDataDisks": { + "type": "integer", + "format": "int32", + "description": "MaxDataDisks value for recommended instance type.\n" + } + } + }, + "ResourceGroup": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Resource group name.\n" + }, + "id": { + "type": "string", + "description": "Resource group id.\n" + } + } + }, + "SecuritySetting": { + "type": "object", + "required": [ + "securityGroupIds" + ], + "properties": { + "securityGroupIds": { + "type": "array", + "description": "This is the id of the security group attached. For images, this will be the default security group ID for new instances from this image.\n", + "items": { + "type": "string" + } + } + } + }, + "StorageAccountType": { + "type": "string", + "description": "Type of the storage account in azure.\n", + "enum": [ + "StandardGeneralPurpose", + "Blob" + ] + }, + "ValidationResponse": { + "type": "object", + "required": [ + "isValidated" + ], + "properties": { + "isValidated": { + "type": "boolean", + "description": "If validation was performed." + }, + "failures": { + "type": "array", + "description": "Validation failures (if any).", + "items": { + "type": "string" + } + } + } + }, + "AddNodesConfig": { + "type": "object", + "required": [ + "ipmiPassword", + "nodes" + ], + "properties": { + "nodes": { + "description": "Mapping of Node name to IP configurations for the node.", + "$ref": "#/definitions/Map_NodeConfig" + }, + "ipmiPassword": { + "type": "string", + "description": "IPMI password.", + "x-secret": true + }, + "encryptionPassword": { + "type": "string", + "description": "The password previously used to encrypt the Rubrik cluster string.\n", + "x-secret": true + }, + "isIpv4ManualDiscoveryMode": { + "type": "boolean", + "description": "A Boolean value that specifies whether to use IPv4 manual discovery mode during node addition.\n" + } + } + }, + "AddNodesOperation": { + "type": "object", + "required": [ + "jobId", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "jobId": { + "type": "string" + } + } + }, + "AddNodesStatus": { + "type": "object", + "required": [ + "createTopLevelFilesystemDirs", + "ipConfig", + "ipmiConfig", + "message", + "metadataSetup", + "setupDisks", + "setupEncryptionAtRest", + "setupLoopDevices", + "setupOsAndMetadataPartitions", + "startServices", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "message": { + "type": "string" + }, + "ipConfig": { + "type": "string" + }, + "metadataSetup": { + "type": "string" + }, + "startServices": { + "type": "string" + }, + "ipmiConfig": { + "type": "string" + }, + "setupDisks": { + "type": "string" + }, + "setupEncryptionAtRest": { + "type": "string" + }, + "setupOsAndMetadataPartitions": { + "type": "string" + }, + "createTopLevelFilesystemDirs": { + "type": "string" + }, + "setupLoopDevices": { + "type": "string" + } + } + }, + "AdminUserConfig": { + "type": "object", + "required": [ + "emailAddress", + "id", + "password" + ], + "properties": { + "id": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "emailAddress": { + "type": "string" + } + } + }, + "AdminUserEmptyPwdConfig": { + "type": "object", + "required": [ + "emailAddress", + "id", + "password" + ], + "properties": { + "id": { + "type": "string" + }, + "password": { + "type": "string" + }, + "emailAddress": { + "type": "string" + } + } + }, + "AwsStorageConfig": { + "type": "object", + "required": [ + "bucketName" + ], + "properties": { + "bucketName": { + "type": "string", + "description": "S3 bucket name." + } + } + }, + "AzureStorageConfig": { + "type": "object", + "required": [ + "connectionString", + "containerName" + ], + "properties": { + "connectionString": { + "type": "string", + "description": "Storage account connection string.", + "x-secret": true + }, + "containerName": { + "type": "string", + "description": "Blob container name." + } + } + }, + "BootstappedResult": { + "type": "object", + "required": [ + "isBootstrapped" + ], + "properties": { + "isBootstrapped": { + "type": "boolean" + } + } + }, + "BootstrapConfig": { + "type": "object", + "required": [ + "adminUserEmptyPwdInfo", + "dnsNameservers", + "name", + "nodeConfigs", + "ntpServerConfigs" + ], + "properties": { + "name": { + "type": "string" + }, + "ntpServerConfigs": { + "type": "array", + "items": { + "$ref": "#/definitions/NtpServerConfiguration" + } + }, + "dnsNameservers": { + "type": "array", + "items": { + "type": "string" + } + }, + "dnsSearchDomains": { + "type": "array", + "items": { + "type": "string" + } + }, + "nodeConfigs": { + "$ref": "#/definitions/Map_NodeConfig" + }, + "adminUserEmptyPwdInfo": { + "$ref": "#/definitions/AdminUserEmptyPwdConfig" + }, + "enableSoftwareEncryptionAtRest": { + "type": "boolean" + }, + "installTarball": { + "type": "string" + }, + "isSetupNetworkOnly": { + "type": "boolean", + "description": "Boolean value that determines whether the task is limited to network changes or includes other system changes. Value is yes when the task is limited to network changes and no when the task includes other system changes." + } + } + }, + "BootstrappableNodeInfo": { + "type": "object", + "required": [ + "hostname", + "ipv6", + "version" + ], + "properties": { + "hostname": { + "type": "string", + "description": "Hostname of a Rubrik node." + }, + "ipv6": { + "type": "string", + "description": "IPv6 address of a Rubrik node." + }, + "version": { + "type": "string", + "description": "Software version of Rubrik CDM." + } + } + }, + "BootstrappableNodeInfoListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/BootstrappableNodeInfo" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "Capacity": { + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "type": "integer", + "format": "int64" + } + } + }, + "CdmPackageInfo": { + "type": "object", + "required": [ + "majorVersion", + "majorVersionReleaseDate", + "releaseDate", + "tarball", + "version" + ], + "properties": { + "tarball": { + "type": "string", + "description": "Name of the Rubrik CDM tarball." + }, + "version": { + "type": "string", + "description": "Rubrik CDM version." + }, + "releaseDate": { + "type": "string", + "description": "Release date of the Rubrik CDM version." + }, + "majorVersion": { + "type": "string", + "description": "The major version of Rubrik CDM." + }, + "majorVersionReleaseDate": { + "type": "string", + "description": "Release date of the major version of Rubrik CDM." + } + } + }, + "CdmPackageInfoListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/CdmPackageInfo" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ClassificationConfiguration": { + "type": "object", + "properties": { + "classificationColor": { + "type": "string", + "description": "The color code for the cluster security classification." + }, + "classificationMessage": { + "type": "string", + "description": "Text for the cluster security classification." + } + }, + "description": "Cluster security classification object." + }, + "CloudStorageLocationConfig": { + "type": "object", + "properties": { + "awsStorageConfig": { + "$ref": "#/definitions/AwsStorageConfig" + }, + "azureStorageConfig": { + "$ref": "#/definitions/AzureStorageConfig" + } + } + }, + "ClusterConfig": { + "type": "object", + "required": [ + "adminUserInfo", + "dnsNameservers", + "name", + "nodeConfigs", + "ntpServerConfigs" + ], + "properties": { + "name": { + "type": "string" + }, + "ntpServerConfigs": { + "type": "array", + "items": { + "$ref": "#/definitions/NtpServerConfiguration" + } + }, + "dnsNameservers": { + "type": "array", + "items": { + "type": "string" + } + }, + "dnsSearchDomains": { + "type": "array", + "items": { + "type": "string" + } + }, + "nodeConfigs": { + "$ref": "#/definitions/Map_NodeConfig" + }, + "adminUserInfo": { + "$ref": "#/definitions/AdminUserConfig" + }, + "enableSoftwareEncryptionAtRest": { + "type": "boolean" + }, + "encryptionAtRestPassword": { + "type": "string", + "x-secret": true + }, + "cloudStorageLocation": { + "$ref": "#/definitions/CloudStorageLocationConfig" + }, + "installTarball": { + "type": "string" + }, + "isSetupNetworkOnly": { + "type": "boolean", + "description": "Set up network only." + }, + "clusterUuid": { + "type": "string", + "description": "A UUID generated by Polaris and assigned as the Cluster UUID. When a cluster is bootstrapped directly from Polaris, the Cluster UUID is sent to the bootstrap API. If the Cluster UUID is empty, the UUID is generated during the bootstrap operation." + } + } + }, + "ClusterConfigStatus": { + "type": "object", + "required": [ + "clusterInstall", + "configAdminUser", + "createTopLevelFilesystemDirs", + "installSchema", + "ipConfig", + "ipmiConfig", + "message", + "metadataSetup", + "resetNodes", + "setupDisks", + "setupEncryptionAtRest", + "setupLoopDevices", + "setupOsAndMetadataPartitions", + "startServices", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "message": { + "type": "string" + }, + "ipConfig": { + "type": "string" + }, + "metadataSetup": { + "type": "string" + }, + "installSchema": { + "type": "string" + }, + "startServices": { + "type": "string" + }, + "ipmiConfig": { + "type": "string" + }, + "configAdminUser": { + "type": "string" + }, + "resetNodes": { + "type": "string" + }, + "setupDisks": { + "type": "string" + }, + "setupEncryptionAtRest": { + "type": "string" + }, + "setupOsAndMetadataPartitions": { + "type": "string" + }, + "createTopLevelFilesystemDirs": { + "type": "string" + }, + "setupLoopDevices": { + "type": "string" + }, + "clusterInstall": { + "type": "string" + } + } + }, + "ClusterInstallConfig": { + "type": "object", + "required": [ + "hosts", + "tarball" + ], + "properties": { + "tarball": { + "type": "string", + "description": "Rubrik CDM tarball for installation." + }, + "hosts": { + "type": "array", + "items": { + "type": "string", + "description": "A list of hostnames of Rubrik nodes." + } + } + } + }, + "ClusterUiPreference": { + "type": "object", + "required": [ + "showCloudSource", + "showHypervVm", + "showLinuxFileset", + "showManagedVolume", + "showMssql", + "showNutanixVm", + "showOracleDatabase", + "showShareFileset", + "showStorageArrayVolumeGroup", + "showVcdVapp", + "showVmwareVm", + "showWindowsFileset" + ], + "properties": { + "showVmwareVm": { + "type": "boolean", + "description": "UI preference of Vmware Vm." + }, + "showHypervVm": { + "type": "boolean", + "description": "UI preference of Hyperv Vm." + }, + "showLinuxFileset": { + "type": "boolean", + "description": "UI preference of Linux Fileset." + }, + "showWindowsFileset": { + "type": "boolean", + "description": "UI preference of Windows Fileset." + }, + "showMssql": { + "type": "boolean", + "description": "UI preference of Mssql." + }, + "showShareFileset": { + "type": "boolean", + "description": "UI preference of Share Fileset." + }, + "showNutanixVm": { + "type": "boolean", + "description": "UI preference of Nutanix Vm." + }, + "showManagedVolume": { + "type": "boolean", + "description": "UI preference of Managed Volume." + }, + "showCloudSource": { + "type": "boolean", + "description": "UI preference of Cloud Source." + }, + "showVcdVapp": { + "type": "boolean", + "description": "UI preference of vCD vApp." + }, + "showStorageArrayVolumeGroup": { + "type": "boolean", + "description": "UI preference of Storage Array Volume Group." + }, + "showOracleDatabase": { + "type": "boolean", + "description": "UI preference of Oracle Database." + } + } + }, + "ClusterUiPreferenceUpdate": { + "type": "object", + "properties": { + "showVmwareVm": { + "type": "boolean", + "description": "Updated UI preference of Vmware Vm." + }, + "showHypervVm": { + "type": "boolean", + "description": "Updated UI preference of Hyperv Vm." + }, + "showLinuxFileset": { + "type": "boolean", + "description": "Updated UI preference of Linux Fileset." + }, + "showWindowsFileset": { + "type": "boolean", + "description": "Updated UI preference of Windows Fileset." + }, + "showMssql": { + "type": "boolean", + "description": "Updated UI preference of Mssql." + }, + "showShareFileset": { + "type": "boolean", + "description": "Updated UI preference of Share Fileset." + }, + "showNutanixVm": { + "type": "boolean", + "description": "Updated UI preference of Nutanix Vm." + }, + "showManagedVolume": { + "type": "boolean", + "description": "Updated UI preference of Managed Volume." + }, + "showCloudSource": { + "type": "boolean", + "description": "Updated UI preference of Cloud Source." + }, + "showVcdVapp": { + "type": "boolean", + "description": "Updated UI preference of vCD vApp." + }, + "showStorageArrayVolumeGroup": { + "type": "boolean", + "description": "Updated UI preference of Storage Array Volume Group." + }, + "showOracleDatabase": { + "type": "boolean", + "description": "Updated UI preference of Oracle Database." + } + } + }, + "CommandResult": { + "type": "object", + "required": [ + "success" + ], + "properties": { + "success": { + "type": "boolean" + } + } + }, + "DecommissionNodesConfig": { + "type": "object", + "required": [ + "nodeIds" + ], + "properties": { + "nodeIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "minTolerableNodeFailures": { + "type": "integer", + "format": "int32", + "description": "Specifies the number of remaining nodes that can fail without impact on data availability and consistency, after a specified node is decommissioned and removed. For example, setting the value to 1 means that a node can only be decommissioned if, after the decommission, the Rubrik cluster can lose 1 additional node and still continue normal operations." + }, + "shouldSkipPrechecks": { + "type": "boolean", + "description": "Indicates whether to skip decommission prechecks. Current decommission prechecks check for live mounts on the cluster before decommission. If decommission prechecks are skipped, decommission may get stuck or live mounts on the node may be lost. Set to true only if decommission prechecks are run and can be safely ignored." + }, + "shouldBlockOnNegativeFailureTolerance": { + "type": "boolean", + "description": "Indicates whether to block if the failure tolerance is negative. The default behavior is set to false (not to block) since Rubrik CDM includes alerts to indicate if the values is negative, making blocking unnecessary. Set to true only if Rubrik CDM reverts to previous behavior, which is to block on negative failure tolerance." + } + } + }, + "DiskCapacity": { + "allOf": [ + { + "$ref": "#/definitions/Capacity" + }, + { + "type": "object", + "required": [ + "isEncrypted" + ], + "properties": { + "isEncrypted": { + "type": "boolean" + } + } + } + ] + }, + "EdgeTrialStatus": { + "type": "object", + "required": [ + "isEdgeTrial" + ], + "properties": { + "isEdgeTrial": { + "type": "boolean", + "description": "A Boolean value that indicates whether the software is for the Rubrik Edge Trial version. When the value is 'true', the software is for the Rubrik Edge Trial version." + }, + "extensionsLeft": { + "type": "integer", + "format": "int32", + "description": "Number of trial extensions left." + }, + "daysLeft": { + "type": "integer", + "format": "int32", + "description": "Number of days left in the trial period." + } + } + }, + "FindBadDiskResult": { + "type": "object", + "required": [ + "result" + ], + "properties": { + "result": { + "description": "Response of the find_bad_disk script.", + "$ref": "#/definitions/FindBadDiskResultEnum" + } + }, + "description": "Result of running the find_bad_disk script." + }, + "FindBadDiskResultEnum": { + "type": "string", + "description": "Describes the result of running the find_bad_disk script.", + "enum": [ + "Failed", + "Missing", + "Okay", + "TurnedOff" + ] + }, + "FloatingIp": { + "type": "object", + "required": [ + "ip", + "nodeId" + ], + "properties": { + "ip": { + "type": "string" + }, + "nodeId": { + "type": "string" + } + } + }, + "FloatingIpListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/FloatingIp" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "GlobalManagerConnectionInfo": { + "type": "object", + "required": [ + "isConnected" + ], + "properties": { + "isConnected": { + "type": "boolean" + }, + "pubKey": { + "type": "string", + "description": "Public key to validate token signature." + } + } + }, + "GlobalManagerConnectionUpdate": { + "type": "object", + "required": [ + "authToken" + ], + "properties": { + "authToken": { + "type": "string", + "x-secret": true + }, + "pubKey": { + "type": "string", + "description": "Public key to validate token signature." + }, + "restartAgent": { + "type": "boolean", + "description": "Whether to restart the spark agent service." + } + } + }, + "InstallStatus": { + "type": "object", + "required": [ + "message", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "IpConfig": { + "type": "object", + "required": [ + "address", + "gateway", + "netmask" + ], + "properties": { + "address": { + "type": "string" + }, + "netmask": { + "type": "string" + }, + "gateway": { + "type": "string" + }, + "vlan": { + "type": "integer", + "format": "int32" + } + } + }, + "IpmiAccess": { + "type": "object", + "required": [ + "https", + "iKvm" + ], + "properties": { + "https": { + "type": "boolean" + }, + "iKvm": { + "type": "boolean" + } + } + }, + "IpmiAccessUpdate": { + "type": "object", + "properties": { + "https": { + "type": "boolean" + }, + "iKvm": { + "type": "boolean" + } + } + }, + "IpmiDetails": { + "type": "object", + "required": [ + "access", + "isAvailable" + ], + "properties": { + "isAvailable": { + "type": "boolean" + }, + "access": { + "$ref": "#/definitions/IpmiAccess" + } + } + }, + "IpmiUpdate": { + "type": "object", + "properties": { + "password": { + "type": "string", + "description": "IPMI password, password should be 5-20 characters.", + "x-secret": true + }, + "access": { + "$ref": "#/definitions/IpmiAccessUpdate" + } + } + }, + "Ipv6": { + "type": "object", + "required": [ + "ip", + "prefix" + ], + "properties": { + "ip": { + "type": "string" + }, + "prefix": { + "type": "integer", + "format": "int32" + } + }, + "description": "IPv6 address." + }, + "Ipv6Configuration": { + "type": "object", + "required": [ + "iface", + "nodeIpv6Addresses" + ], + "properties": { + "iface": { + "type": "string", + "description": "Network interface where IPv6 addresses are configured." + }, + "nodeIpv6Addresses": { + "description": "A map between node ID and IPv6 addresses used by the node on a specific interface.", + "$ref": "#/definitions/Map_Ipv6" + } + } + }, + "Ipv6ConfigurationListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/Ipv6Configuration" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "IscsiAddParams": { + "type": "object", + "required": [ + "portalDetails" + ], + "properties": { + "portalDetails": { + "$ref": "#/definitions/IscsiPortalInfo" + }, + "auth": { + "type": "boolean", + "description": "is authentication required for discovery too, false if not specified. Ignored if target is provided." + }, + "target": { + "type": "string", + "description": "name of the iSCSI target." + } + } + }, + "IscsiPortalInfo": { + "type": "object", + "required": [ + "host", + "port" + ], + "properties": { + "host": { + "type": "string", + "description": "Resolvable hostname or IP address of the iSCSI portal." + }, + "port": { + "type": "string", + "description": "Inbound port on the iSCSI portal, default is 3260." + }, + "username": { + "type": "string", + "description": "Username for CHAP authentication on the iSCSI device. No value for an unauthenticated connection." + }, + "password": { + "type": "string", + "description": "Password for CHAP authentication on the iSCSI device. No value for an unauthenticated connection.", + "x-secret": true + }, + "usernameIn": { + "type": "string", + "description": "Username for CHAP authentication on the Rubrik cluster. No value for an unauthenticate or unidirectional authenticated connection." + }, + "passwordIn": { + "type": "string", + "description": "Password for CHAP authentication on the Rubrik cluster. No value for an unauthenticate or unidirectional authenticated connection.", + "x-secret": true + } + } + }, + "LoginBannerConfiguration": { + "type": "object", + "properties": { + "loginBanner": { + "type": "string", + "description": "The banner that displays after a successful login." + } + }, + "description": "Login banner configuration object." + }, + "Map_Ipv6": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Ipv6" + } + }, + "Map_NodeConfig": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/NodeConfig" + } + }, + "NetworkInterface": { + "type": "object", + "required": [ + "interfaceName", + "interfaceType", + "ipAddresses", + "netmask", + "nodeId" + ], + "properties": { + "interfaceName": { + "type": "string", + "description": "Interface name." + }, + "nodeId": { + "type": "string", + "description": "Node id." + }, + "nodeName": { + "type": "string", + "description": "Hostname of the node." + }, + "netmask": { + "type": "string", + "description": "Netmask for addresses on this interface." + }, + "ipAddresses": { + "type": "array", + "description": "Including both primary Ips and floating Ips.", + "items": { + "type": "string" + } + }, + "interfaceType": { + "description": "Network interface type.", + "$ref": "#/definitions/NetworkInterfaceType" + } + } + }, + "NetworkInterfaceListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NetworkInterface" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NetworkInterfaceType": { + "type": "string", + "description": "Type of network interfaces.", + "enum": [ + "Management", + "Data", + "Service", + "Other" + ] + }, + "NodeConfig": { + "type": "object", + "required": [ + "managementIpConfig" + ], + "properties": { + "managementIpConfig": { + "$ref": "#/definitions/IpConfig" + }, + "ipmiIpConfig": { + "$ref": "#/definitions/IpConfig" + }, + "dataIpConfig": { + "$ref": "#/definitions/IpConfig" + }, + "vlanIpConfigs": { + "type": "array", + "description": "VLAN Ids and associated IPs for the node.", + "items": { + "$ref": "#/definitions/VlanIp" + } + } + } + }, + "NodeIp": { + "type": "object", + "required": [ + "ip", + "node" + ], + "properties": { + "node": { + "type": "string", + "description": "Node this interface is configured on." + }, + "ip": { + "type": "string", + "description": "IP of the node." + } + } + }, + "NodeOperation": { + "type": "object", + "required": [ + "id", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "id": { + "type": "integer", + "format": "int64" + } + } + }, + "NtpServerConfiguration": { + "type": "object", + "required": [ + "server" + ], + "properties": { + "server": { + "type": "string", + "description": "Name or IP address of the NTP server." + }, + "symmetricKey": { + "$ref": "#/definitions/NtpSymmKeyConfiguration" + } + } + }, + "NtpServerConfigurationListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NtpServerConfiguration" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NtpSymmKeyConfiguration": { + "type": "object", + "required": [ + "key", + "keyId", + "keyType" + ], + "properties": { + "keyId": { + "type": "integer", + "format": "int32", + "description": "Symmetric key id." + }, + "key": { + "type": "string", + "description": "Symmetric key (asci or hex format)." + }, + "keyType": { + "type": "string", + "description": "Symmetric key type (e.g., MD5, SHA1)." + } + } + }, + "PlatformInfo": { + "type": "object", + "required": [ + "isPwdEncryptionSupported", + "platform", + "registrationRequirement", + "shouldConfigureIpmi" + ], + "properties": { + "platform": { + "$ref": "#/definitions/PlatformEnum" + }, + "registrationRequirement": { + "$ref": "#/definitions/RegistrationRequirement" + }, + "shouldConfigureIpmi": { + "type": "boolean", + "description": "A boolean that specifies whether IPMI should be configured on this platform.\n" + }, + "isPwdEncryptionSupported": { + "type": "boolean", + "description": "A boolean that indicates whether the platform supports password-based (PWD) encryption at rest.\n" + }, + "platformDetails": { + "type": "string", + "description": "More details about the platform." + }, + "cpuCount": { + "type": "integer", + "format": "int32", + "description": "Number of CPUs." + }, + "ramSize": { + "type": "integer", + "format": "int32", + "description": "Amount of RAM in megabytes." + } + } + }, + "RecommissionNodeStatus": { + "type": "object", + "required": [ + "message", + "metadataSetup", + "startServices", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "startServices": { + "type": "string" + }, + "metadataSetup": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "RegistrationDetails": { + "type": "object", + "required": [ + "credentialsWrapper", + "credentialsWrapperSignature", + "registrationId" + ], + "properties": { + "registrationId": { + "type": "string", + "description": "String value created by signing the UUID of the specified Rubrik cluster with the private key of the Heartbeat server.\n" + }, + "credentialsWrapper": { + "type": "string", + "description": "Wrapper from the Support tunnel key distribution center containing the 'registrationId' value for the specified Rubrik cluster and a JSON blob with the public key and password hash for the associated rksupport credential.\n" + }, + "credentialsWrapperSignature": { + "type": "string", + "description": "String value created by signing a credentialsWrapper with the private key of the Heartbeat server.\n" + } + } + }, + "RemoveNodeStatus": { + "type": "object", + "required": [ + "message", + "removeMetadatastore", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "removeMetadatastore": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "SnmpConfiguration": { + "type": "object", + "required": [ + "isEnabled", + "snmpAgentPort" + ], + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Boolean value that specifies whether the SNMP service is enabled. Set the value to true to enable the SNMP service and false to disable the SNMP service.\n" + }, + "communityString": { + "type": "string", + "description": "communityString is a user specified string for authentication to access SNMP statistics. Provides access to MIBs using SNMP v2c.\n" + }, + "snmpAgentPort": { + "type": "integer", + "format": "int32", + "description": "The SNMP agent port on the Rubrik cluster node." + }, + "trapReceiverConfigs": { + "type": "array", + "description": "Array of SNMP trap receivers for the SNMP service.", + "items": { + "$ref": "#/definitions/SnmpTrapReceiverConfig" + } + }, + "users": { + "type": "array", + "description": "Array of usernames for the SNMP service. Provides access to MIBs using SNMP v3.\n", + "items": { + "type": "string", + "description": "Username string." + } + } + }, + "description": "SNMP service configuration object summary." + }, + "SnmpConfigurationPatch": { + "type": "object", + "required": [ + "isEnabled", + "snmpAgentPort" + ], + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Boolean value that specifies whether the SNMP service is enabled. Set the value to true to enable the SNMP service and false to disable the SNMP service.\n" + }, + "communityString": { + "type": "string", + "description": "communityString is a user specified string for authentication to access SNMP statistics. Provides access to MIBs using SNMP v2c.\n" + }, + "snmpAgentPort": { + "type": "integer", + "format": "int32", + "description": "The SNMP agent port on the Rubrik cluster node." + }, + "trapReceiverConfigs": { + "type": "array", + "description": "Array of SNMP trap receivers for the SNMP service.", + "items": { + "$ref": "#/definitions/SnmpTrapReceiverConfig" + } + }, + "users": { + "type": "array", + "description": "Array of users for the SNMP service. Provides access to MIBs using SNMP v3.\n", + "items": { + "$ref": "#/definitions/SnmpUserConfig" + } + } + }, + "description": "SNMP service configuration object." + }, + "SnmpSecurityLevelEnum": { + "type": "string", + "description": "Describes the security level for an SNMP trap receiver host.", + "enum": [ + "AuthPriv", + "AuthNoPriv", + "NoAuthNoPriv" + ] + }, + "SnmpTrapReceiverConfig": { + "type": "object", + "required": [ + "address", + "port" + ], + "properties": { + "address": { + "type": "string", + "description": "IPv4 address or FQDN of the SNMP trap receiver host." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "The SNMP trap port on the SNMP trap receiver host." + }, + "user": { + "type": "string", + "description": "Specifies the user for the SNMP trap receiver host. A valid user is required in order to use SNMP v3. The specified user must be a valid user in the users field of the SNMP configuration. When no user is specified, SNMP v2c traps are sent to the SNMP trap receiver host. If a trap receiver user is specified, the trap receiver security level must also be specified.\n" + }, + "securityLevel": { + "description": "Specifies the security level for the SNMP trap receiver host. If a trap receiver user is specified, a securityLevel must be specified.\n", + "$ref": "#/definitions/SnmpSecurityLevelEnum" + } + }, + "description": "SNMP trap receiver configuration object." + }, + "SnmpUserConfig": { + "type": "object", + "required": [ + "authPassword", + "privPassword", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username for SNMP v3 MIB access." + }, + "authPassword": { + "type": "string", + "description": "Authentication password for the SHA hash.", + "x-secret": true + }, + "privPassword": { + "type": "string", + "description": "Password for AES encryption.", + "x-secret": true + } + }, + "description": "SNMP user configuration object." + }, + "SupportCommunityUserCredentials": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username for Rubrik Customer Support Community Portal." + }, + "password": { + "type": "string", + "description": "Password for Rubrik Customer Support Community Portal.", + "x-secret": true + } + } + }, + "SystemStatus": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "type": "string", + "description": "System status." + }, + "message": { + "type": "string", + "description": "Human readable explanation for system status." + }, + "affectedNodeIds": { + "type": "array", + "description": "An array of the IDs of each node in an abnormal state from the specified Rubrik CDM cluster.\n", + "items": { + "type": "string" + } + } + } + }, + "VlanConfig": { + "type": "object", + "required": [ + "interfaces", + "netmask", + "vlan" + ], + "properties": { + "vlan": { + "type": "integer", + "format": "int32" + }, + "netmask": { + "type": "string", + "description": "Netmask for addresses on this VLAN." + }, + "interfaces": { + "type": "array", + "description": "Network interfaces for each node.", + "items": { + "$ref": "#/definitions/NodeIp" + } + } + } + }, + "VlanConfigListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VlanConfig" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VlanId": { + "type": "object", + "required": [ + "vlan" + ], + "properties": { + "vlan": { + "type": "integer", + "format": "int32", + "description": "VLAN ID." + } + } + }, + "VlanIp": { + "type": "object", + "required": [ + "ip", + "vlan" + ], + "properties": { + "vlan": { + "type": "integer", + "format": "int32", + "description": "VLAN ID for the node." + }, + "ip": { + "type": "string", + "description": "IP for the VLAN interface." + } + } + }, + "Map_Boolean": { + "type": "object", + "additionalProperties": { + "type": "boolean" + } + }, + "Map_String": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "AgentConnectStatus": { + "type": "string", + "description": "The agent connection status.", + "enum": [ + "Unregistered", + "Disconnected", + "Connected", + "SecondaryCluster" + ] + }, + "BooleanResponse": { + "type": "object", + "required": [ + "value" + ], + "properties": { + "value": { + "type": "boolean" + } + } + }, + "CountResponse": { + "type": "object", + "required": [ + "count" + ], + "properties": { + "count": { + "type": "integer", + "format": "int64" + } + } + }, + "HierarchyViewType": { + "type": "string", + "description": "View type for a particular managed hierarchy. For example, we can traverse the vCenter managed hierarchy from a folder-oriented view. Multiple hierarchies can use this enum, not just VMware.", + "enum": [ + "VmwareClusterHostView", + "VmwareFolderView", + "VmwareTagView" + ] + }, + "IdNamePair": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "IdNamePairListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/IdNamePair" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "JobScheduledResponse": { + "type": "object", + "required": [ + "jobInstanceId" + ], + "properties": { + "jobInstanceId": { + "type": "string" + } + } + }, + "MountStatus": { + "type": "string", + "description": "The current status of a specified mount. When the status is **Available**, the mount is ready to use.", + "enum": [ + "Available", + "Unavailable", + "Mounting", + "Unmounting" + ] + }, + "OperatingSystemType": { + "type": "string", + "description": "Operating system of a specified machine.", + "enum": [ + "AIX", + "SunOS", + "HPUX", + "Linux", + "Windows", + "Unknown" + ] + }, + "SnapshotLocation": { + "type": "string", + "description": "Specifies the location of a snapshot.", + "enum": [ + "all", + "local" + ] + }, + "CommunityUserCredentials": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username for the account used to login to the Rubrik community / self-serve website." + }, + "password": { + "type": "string", + "description": "Password for the account used to login to the Rubrik community / self-serve website.", + "x-secret": true + } + } + }, + "BackupTaskDiagnosticInfo": { + "type": "object", + "required": [ + "taskStatus" + ], + "properties": { + "taskStatus": { + "description": "Status of the task.", + "$ref": "#/definitions/DiagnosticTaskStatus" + }, + "queueTime": { + "type": "string", + "format": "date-time", + "description": "The scheduled start time of the task." + }, + "expectedEndTime": { + "type": "string", + "format": "date-time", + "description": "The expected completion time of the task." + } + } + }, + "DiagnosticTaskStatus": { + "type": "string", + "description": "Status of the task.", + "enum": [ + "Queued", + "Started", + "Retried", + "Succeeded", + "Failed", + "Canceled" + ] + }, + "TaskDiagnosticInfo": { + "type": "object", + "required": [ + "endedTimes", + "isLate", + "queueTime", + "startedTimes", + "taskStatus", + "taskType" + ], + "properties": { + "taskType": { + "description": "Task type.", + "$ref": "#/definitions/ReportableTaskType" + }, + "taskStatus": { + "description": "Status of the task.", + "$ref": "#/definitions/DiagnosticTaskStatus" + }, + "eventSeriesId": { + "type": "string", + "description": "The event series id of the task." + }, + "queueTime": { + "type": "string", + "format": "date-time", + "description": "The scheduled start time of the task." + }, + "expectedEndTime": { + "type": "string", + "format": "date-time", + "description": "The time by which the task need to finish." + }, + "startedTimes": { + "type": "array", + "description": "The start times of the task for each retry.", + "items": { + "type": "string", + "format": "date-time" + } + }, + "endedTimes": { + "type": "array", + "description": "The end times of the task for each retry.", + "items": { + "type": "string", + "format": "date-time" + } + }, + "isLate": { + "type": "boolean", + "description": "Whether the task is running late." + } + } + }, + "EnvoyBulkUpdate": { + "type": "object", + "required": [ + "id", + "updateProperties" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a Rubrik Envoy object." + }, + "updateProperties": { + "$ref": "#/definitions/EnvoyUpdate" + } + } + }, + "EnvoyCreate": { + "type": "object", + "required": [ + "ipAddress", + "port" + ], + "properties": { + "ipAddress": { + "type": "string", + "description": "IPv4 address of a specified Rubrik Envoy object that can be accessed by the specified Rubrik cluster." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Rubrik cluster port assigned to the specified organization for tunnel connections to the Rubrik web UI." + } + } + }, + "EnvoyDetail": { + "allOf": [ + { + "$ref": "#/definitions/EnvoySummary" + } + ] + }, + "EnvoyDetailList": { + "type": "object", + "required": [ + "envoyDetails" + ], + "properties": { + "envoyDetails": { + "type": "array", + "description": "A list of detailed view of Rubrik Envoy objects.", + "items": { + "$ref": "#/definitions/EnvoyDetail" + } + } + } + }, + "EnvoyIdList": { + "type": "object", + "required": [ + "envoyIds" + ], + "properties": { + "envoyIds": { + "type": "array", + "description": "A list of Rubrik Envoy objects IDs.", + "items": { + "type": "string" + } + } + } + }, + "EnvoySummary": { + "type": "object", + "required": [ + "id", + "ipAddress", + "organizationId", + "port", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a Rubrik Envoy object." + }, + "ipAddress": { + "type": "string", + "description": "IPv4 address of a specified Rubrik Envoy object that can be accessed by the specified Rubrik cluster." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Rubrik cluster port assigned to the specified organization for tunnel connections to the Rubrik web UI." + }, + "organizationId": { + "type": "string", + "description": "ID assigned to the organization object that is associated with the specified Rubrik Envoy object." + }, + "status": { + "type": "string", + "description": "Connection state of the specified Rubrik Envoy object." + } + } + }, + "EnvoySummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/EnvoySummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "EnvoyUpdate": { + "type": "object", + "properties": { + "ipAddress": { + "type": "string", + "description": "IPv4 address of a specified Rubrik Envoy object that can be accessed by the specified Rubrik cluster." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Rubrik cluster port assigned to the specified organization for tunnel connections to the Rubrik web UI." + } + } + }, + "EnvoyUserInfo": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username of a specified Rubrik Envoy object that can be used to access envoy vm." + }, + "password": { + "type": "string", + "description": "Password of a specified Rubrik Envoy object that can be used to access envoy vm." + } + } + }, + "EventMonitoredJobCountByJobType": { + "type": "object", + "required": [ + "after_date", + "archivalCount", + "backupCount", + "before_date", + "eventType", + "instantiateCount", + "recoveryCount", + "replicationCount", + "status", + "totalCount" + ], + "properties": { + "eventType": { + "type": "array", + "description": "Type of event series.", + "items": { + "$ref": "#/definitions/MonitoringSummaryEventType" + } + }, + "status": { + "type": "array", + "description": "The status of the most recent event in the event series.", + "items": { + "$ref": "#/definitions/JobEventSeriesStatus" + } + }, + "before_date": { + "type": "string", + "format": "date-time", + "description": "A filter that returns all job-related events prior to a specified date." + }, + "after_date": { + "type": "string", + "format": "date-time", + "description": "A filter that returns all job-related events after a specified date." + }, + "totalCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all jobs." + }, + "archivalCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all Archival jobs." + }, + "backupCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all Backup jobs." + }, + "instantiateCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all Instantiated jobs." + }, + "recoveryCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all Recovery jobs." + }, + "replicationCount": { + "type": "integer", + "format": "int64", + "description": "The total count of all Replication jobs." + } + } + }, + "EventMonitoredJobCountByStatus": { + "type": "object", + "required": [ + "after_date", + "before_date", + "canceledCount", + "eventType", + "failureCount", + "queuedCount", + "runningCount", + "status", + "successCount", + "totalCount" + ], + "properties": { + "eventType": { + "type": "array", + "description": "Type of event series.", + "items": { + "$ref": "#/definitions/MonitoringSummaryEventType" + } + }, + "status": { + "type": "array", + "description": "The status of the most recent event in the event series.", + "items": { + "$ref": "#/definitions/JobEventSeriesStatus" + } + }, + "before_date": { + "type": "string", + "format": "date-time", + "description": "A filter that returns all job-related events prior to a specified date." + }, + "after_date": { + "type": "string", + "format": "date-time", + "description": "A filter that returns all job-related events after a specified date." + }, + "totalCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs." + }, + "failureCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs with the 'Failed' status." + }, + "runningCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs with the 'Running' status." + }, + "queuedCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs with the 'Queued' status." + }, + "successCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs with the 'Succeeded' status." + }, + "canceledCount": { + "type": "integer", + "format": "int32", + "description": "The total count of all jobs with the 'Canceled' status." + } + } + }, + "EventSeriesDetail": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "jobInstanceId": { + "type": "string" + } + } + }, + "EventSeriesMonitoredJobSummary": { + "type": "object", + "required": [ + "eventDate", + "eventId", + "eventSeriesId", + "hasJob", + "startTime", + "status", + "taskType" + ], + "properties": { + "taskType": { + "description": "Type of Event Series: One of Backup, Instantantiate, Archive, Replication, Recovery.\n", + "$ref": "#/definitions/MonitoringSummaryEventType" + }, + "objectInfo": { + "description": "The information of the object associated with this job.", + "$ref": "#/definitions/ObjectInfo" + }, + "location": { + "type": "string" + }, + "username": { + "type": "string", + "description": "The username of the user that initiated the job." + }, + "eventSeriesId": { + "type": "string", + "description": "The ID of the event series." + }, + "eventId": { + "type": "string", + "description": "The ID of the event." + }, + "jobInstanceId": { + "type": "string", + "description": "The ID of the associated job instance." + }, + "status": { + "type": "string", + "description": "The status of the event series." + }, + "slaInfo": { + "description": "The information of the SLA associated with this job.", + "$ref": "#/definitions/SlaInfo" + }, + "startTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp when the job started. Only used when the event corresponds to a job." + }, + "endTime": { + "type": "string", + "format": "date-time", + "description": "The time of the last event in a completed event series. When the event corresponds to a job, the time that the job finished." + }, + "duration": { + "type": "string", + "description": "The current run time of the job that corresponds to the events. For completed jobs, this is equal to the total job run time." + }, + "archiveTargetName": { + "type": "string", + "description": "The name of the archive target." + }, + "replicationSourceCluterName": { + "type": "string", + "description": "The name of the source cluster related to the replication task." + }, + "replicationTargetLocationName": { + "type": "string", + "description": "The name of the target location related to the replication task." + }, + "objectLogicalSize": { + "type": "integer", + "format": "int64", + "description": "The logical size of the snappable that is related to the event." + }, + "logicalSize": { + "type": "integer", + "format": "int64", + "description": "The logical size protected by the job that is related to the event." + }, + "dataTransferred": { + "type": "integer", + "format": "int64", + "description": "The amount of data transferred by the event or job to date." + }, + "throughput": { + "type": "integer", + "format": "int64", + "description": "The average rate of data transfer, measured in bytes per second. This rate is the total amount of data transferred divided by the total time required by the transfer." + }, + "hasJob": { + "type": "boolean", + "description": "A Boolean value that determines whether the event series is linked to a backend job. When 'true' the event series is linked to a backend job. When 'false' the event series is not linked to a backend job." + }, + "dataToTransfer": { + "type": "integer", + "format": "int64", + "description": "The amount of data that needs to be transferred by the job that corresponds to the event." + }, + "numberOfRetries": { + "type": "integer", + "format": "int64", + "description": "The number of times the job has been retried." + }, + "estimatedTimeRemaining": { + "type": "string", + "format": "date-time", + "description": "Estimated amount of time remaining for the job corresponding to the event." + }, + "nextJobStartTime": { + "type": "string", + "format": "date-time", + "description": "If the event corresponds to a job,the start time of the next job." + }, + "previousSuccessfulJobTime": { + "type": "string", + "format": "date-time", + "description": "If the event corresponds to a job, the time of the last successful job." + }, + "nodeIds": { + "type": "array", + "description": "The IDs of the node.", + "items": { + "type": "string" + } + }, + "isFirstFullSnapshot": { + "type": "boolean", + "description": "A Boolean value that determines whether the job assocaited with the event is a first full snapshot backup." + }, + "eventDate": { + "type": "string", + "format": "date-time", + "description": "Date of the event." + } + } + }, + "EventSeverity": { + "type": "string", + "description": "Severity of an event.", + "enum": [ + "Informational", + "Warning", + "Critical" + ] + }, + "EventStatusSummary": { + "type": "object", + "required": [ + "attemptNumber", + "id", + "isCancelRequested", + "isCancelable", + "progress" + ], + "properties": { + "id": { + "type": "string" + }, + "jobInstanceId": { + "type": "string" + }, + "progress": { + "type": "string" + }, + "isCancelable": { + "type": "boolean" + }, + "isCancelRequested": { + "type": "boolean" + }, + "attemptNumber": { + "type": "integer", + "format": "int32" + } + } + }, + "EventStatusSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/EventStatusSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "EventType": { + "type": "string", + "description": "Type of an event.", + "enum": [ + "Archive", + "Audit", + "AuthDomain", + "AwsEvent", + "Backup", + "Classification", + "CloudNativeSource", + "CloudNativeVm", + "Configuration", + "Connection", + "Conversion", + "Diagnostic", + "Discovery", + "Failover", + "Fileset", + "Hardware", + "HostEvent", + "HypervScvmm", + "HypervServer", + "Index", + "Instantiate", + "LegalHold", + "Maintenance", + "NutanixCluster", + "Recovery", + "Replication", + "Storage", + "StorageArray", + "StormResource", + "Support", + "System", + "TestFailover", + "Upgrade", + "User", + "VCenter", + "Vcd", + "VolumeGroup" + ] + }, + "JobEventSeriesStatus": { + "type": "string", + "description": "Status of the job.", + "enum": [ + "Failure", + "Queued", + "Success", + "Active" + ] + }, + "MonitoringSummaryEventType": { + "type": "string", + "description": "Event types used in event summary.", + "enum": [ + "Archive", + "Backup", + "Instantiate", + "Recovery", + "Replication" + ] + }, + "NotificationSettingCreate": { + "type": "object", + "required": [ + "emailAddresses", + "eventTypes", + "objectTypes", + "severity", + "shouldSendToSyslog", + "snmpAddresses" + ], + "properties": { + "eventTypes": { + "type": "array", + "description": "The event types associated with this Notification Setting.", + "items": { + "type": "string" + } + }, + "emailAddresses": { + "type": "array", + "description": "The email addresses where notifications are sent.", + "items": { + "type": "string" + } + }, + "snmpAddresses": { + "type": "array", + "description": "The SNMP traps where notifications are sent.", + "items": { + "type": "string" + } + }, + "shouldSendToSyslog": { + "type": "boolean", + "description": "A Boolean value that specifies whether notifications are sent to syslog. When 'true', notifications are sent to syslog. When 'false' notifications are not sent to syslog." + }, + "organizationId": { + "type": "string", + "description": "The ID of the organization this Notification Setting should belong to." + }, + "severity": { + "type": "array", + "description": "The severity associated with this Notification Setting.", + "items": { + "type": "string" + } + }, + "objectTypes": { + "type": "array", + "description": "The objectTypes associated with this Notification Setting.", + "items": { + "type": "string" + } + } + } + }, + "NotificationSettingSummary": { + "type": "object", + "required": [ + "emailAddresses", + "eventTypes", + "id", + "objectTypes", + "severity", + "shouldSendToSyslog", + "snmpAddresses" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a Notification Setting object." + }, + "eventTypes": { + "type": "array", + "description": "The event types associated with this Notification Setting.", + "items": { + "type": "string" + } + }, + "emailAddresses": { + "type": "array", + "description": "The email addresses where notifications are sent.", + "items": { + "type": "string" + } + }, + "snmpAddresses": { + "type": "array", + "description": "The SNMP traps where notifications are sent.", + "items": { + "type": "string" + } + }, + "shouldSendToSyslog": { + "type": "boolean", + "description": "A Boolean value that specifies whether notifications are sent to syslog. When 'true', notifications are sent to syslog. When 'false' notifications are not sent to syslog." + }, + "severity": { + "type": "array", + "description": "The severity level(s) of this Notification Setting.", + "items": { + "type": "string" + } + }, + "objectTypes": { + "type": "array", + "description": "The object type(s) of this Notification Setting.", + "items": { + "type": "string" + } + } + } + }, + "NotificationSettingSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NotificationSettingSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ObjectInfo": { + "type": "object", + "required": [ + "objectId", + "objectName", + "objectType" + ], + "properties": { + "objectId": { + "type": "string", + "description": "The ID of the object." + }, + "objectName": { + "type": "string", + "description": "The name of the object." + }, + "objectType": { + "type": "string", + "description": "The type of the associated object." + } + } + }, + "SlaInfo": { + "type": "object", + "required": [ + "slaId", + "slaName" + ], + "properties": { + "slaId": { + "type": "string", + "description": "The SLA ID." + }, + "slaName": { + "type": "string", + "description": "The SLA name." + }, + "isOnDemand": { + "type": "boolean", + "description": "Boolean value indicates on demand job when the value is true." + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when an SLA Domain is Retention Locked." + } + } + }, + "ChildSnappableFailoverInfo": { + "type": "object", + "required": [ + "childSnappableId", + "recoveryPlanId" + ], + "properties": { + "childSnappableId": { + "type": "string", + "description": "Child snappable of the Blueprint." + }, + "recoveryPlanId": { + "type": "string", + "description": "Recovery plan ID of the child snappable." + }, + "pitRecoveryInfo": { + "description": "If defined, provides point in time recovery details for this child protected object.", + "$ref": "#/definitions/VmPitRecoveryInfo" + } + } + }, + "DataStoreStatusDetails": { + "type": "object", + "required": [ + "accessible", + "freeSpaceInBytes", + "id", + "vcenterId" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the DataStore." + }, + "vcenterId": { + "type": "string", + "description": "The ID of the Vcenter which this DataStore belongs to." + }, + "accessible": { + "type": "boolean", + "description": "True if the DataStore specified is accessible." + }, + "freeSpaceInBytes": { + "type": "integer", + "format": "int64", + "description": "Amount of free space left on this DataStore in bytes." + } + } + }, + "DataStoreStatusQueryRequest": { + "type": "object", + "required": [ + "dataStoreStatusQueryRequestDetails" + ], + "properties": { + "dataStoreStatusQueryRequestDetails": { + "type": "array", + "items": { + "$ref": "#/definitions/DataStoreStatusQueryRequestDetails" + } + } + } + }, + "DataStoreStatusQueryRequestDetails": { + "type": "object", + "required": [ + "dataStoreIds", + "vcenterId" + ], + "properties": { + "dataStoreIds": { + "type": "array", + "description": "The list of DataStore IDs to query status.", + "items": { + "type": "string" + } + }, + "vcenterId": { + "type": "string", + "description": "The ID of the Vcenter which the list of DataStores belong to." + } + } + }, + "DataStoreStatusQueryResponse": { + "type": "object", + "required": [ + "dataStoreStatusDetails" + ], + "properties": { + "dataStoreStatusDetails": { + "type": "array", + "description": "The DataStore status details.", + "items": { + "$ref": "#/definitions/DataStoreStatusDetails" + } + } + } + }, + "FailoverFailureAction": { + "type": "string", + "description": "FailoverFailureAction defines what action should be taken if the failover job fails.", + "enum": [ + "Pause", + "Cleanup" + ] + }, + "FailoverStatus": { + "type": "string", + "description": "FailoverStatus defines what is the status of the failover. For example, DoesNotExist means that such a failover is not recognized by CDM.", + "enum": [ + "DoesNotExist", + "Ongoing", + "Paused", + "FailoverJobSucceeded", + "FailoverJobFailed", + "FailoverCleanupStarted", + "TestFailoverSucceeded", + "FailoverFailed" + ] + }, + "FailoverStatusResponse": { + "type": "object", + "required": [ + "failoverStatus" + ], + "properties": { + "failoverStatus": { + "description": "The status of the failover.", + "$ref": "#/definitions/FailoverStatus" + } + } + }, + "FailoverType": { + "type": "string", + "description": "FailoverType defines what kind of failover it is, production or test.", + "enum": [ + "Production", + "Test" + ] + }, + "RecoverySpecSupportedSnappableType": { + "type": "string", + "description": "RecoverySpecSupportedSnappableType defines the snappable types which supports upserting of snappable recovery specs.", + "enum": [ + "VmwareVirtualMachine" + ] + }, + "RecoverySpecValidationResponse": { + "type": "object", + "required": [ + "recoverySpecValidityStatus" + ], + "properties": { + "recoverySpecValidityStatus": { + "description": "Status defining validity of the recovery spec.", + "$ref": "#/definitions/RecoverySpecValidityStatus" + }, + "errorMessages": { + "type": "array", + "description": "List of errors found with the recovery spec of the children of this Blueprint.", + "items": { + "type": "string" + } + } + } + }, + "RecoverySpecValidityStatus": { + "type": "string", + "description": "RecoverySpecValidityStatus defines the validity of the current recovery spec of the children of the App Blueprint being failed over.", + "enum": [ + "Valid", + "Invalid" + ] + }, + "SnappableRecoverySpec": { + "type": "object", + "required": [ + "serializedSnappableRecoverySpec", + "snappableId", + "snappableType" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "The ID of the snappable. Note that the ID is CDM ID not Polaris ID." + }, + "snappableType": { + "description": "The snappable type of the snappable.", + "$ref": "#/definitions/RecoverySpecSupportedSnappableType" + }, + "serializedSnappableRecoverySpec": { + "type": "string", + "description": "The serialized recovery spec of the snappable." + } + } + }, + "SnappableRecoverySpecDetails": { + "type": "object", + "required": [ + "platformId", + "polarisBlueprintId", + "snappableRecoverySpecs" + ], + "properties": { + "polarisBlueprintId": { + "type": "string", + "description": "Polaris ID of the Blueprint. We only allow to update recovery specs for snappables which belong to a Blueprint. This Blueprint ID is used to validate if snappables specifed are indeed the children of this Blueprint." + }, + "platformId": { + "type": "string", + "description": "The CDM cluster UUID or Polaris account ID where the blueprint was created." + }, + "snappableRecoverySpecs": { + "type": "array", + "description": "List of snappable recovery spec details.", + "items": { + "$ref": "#/definitions/SnappableRecoverySpec" + } + } + } + }, + "TriggerFailoverOnSourceDefinition": { + "type": "object", + "required": [ + "failoverId", + "failoverType", + "isZeroRpo" + ], + "properties": { + "failoverId": { + "type": "string", + "description": "ID of the failover." + }, + "failoverType": { + "description": "The type of failover. A test failover requires a snapshot ID and does not shut down or unprotect the App Bluerint. A production failover either takes a snapshot ID or is zero data loss, and does shut down and unprotect the Blueprint. In zero data loss production failover, the snapshot is taken after the Blueprint is shut down.", + "$ref": "#/definitions/FailoverType" + }, + "isZeroRpo": { + "type": "boolean", + "description": "A boolean that specifies whether or not the failover is zero data loss. When the value is 'true', the failover is zero data loss failover." + }, + "snapshotId": { + "type": "string", + "description": "ID of a snapshot of the Blueprint to fail over." + } + } + }, + "TriggerFailoverOnTargetDefinition": { + "type": "object", + "required": [ + "appSnapshotInfo", + "childSnappableFailoverInfos", + "failoverFailureAction", + "failoverType", + "replicationSourceId" + ], + "properties": { + "newPolarisAppBlueprintId": { + "type": "string", + "description": "Polaris ID of the Blueprint being created by the failover job. This field is required for production failovers." + }, + "replicationSourceId": { + "type": "string", + "description": "ID of the source from which failover is being done." + }, + "failoverType": { + "description": "Type of the failover.", + "$ref": "#/definitions/FailoverType" + }, + "failoverFailureAction": { + "description": "Action to be taken if the failover job fails.", + "$ref": "#/definitions/FailoverFailureAction" + }, + "appSnapshotInfo": { + "description": "Information required to replicate the App snapshot along with it's child snapshots from Polaris.", + "$ref": "#/definitions/ReplicationSnapshotInfo" + }, + "childSnappableFailoverInfos": { + "type": "array", + "description": "Information required to fail over the child snappables according to the user requirements.", + "items": { + "$ref": "#/definitions/ChildSnappableFailoverInfo" + } + } + } + }, + "UpsertSnappableRecoverySpecResponse": { + "type": "object", + "required": [ + "snappableRecoverySpecDetails" + ], + "properties": { + "snappableRecoverySpecDetails": { + "description": "The consumed snappable recovery specs details.", + "$ref": "#/definitions/SnappableRecoverySpecDetails" + } + } + }, + "VmPitRecoveryInfo": { + "type": "object", + "required": [ + "recoveryPoint" + ], + "properties": { + "recoveryPoint": { + "type": "integer", + "format": "int64", + "description": "Point in time, in the number of milliseconds since the UNIX epoch, to which the protected object is recovering." + } + } + }, + "AdaptiveThrottlingSettingsFilesetFields": { + "type": "object", + "properties": { + "filesetThrottlingSettings": { + "$ref": "#/definitions/FilesetAdaptiveThrottlingSettings" + } + } + }, + "FilesetAdaptiveThrottlingSettings": { + "type": "object", + "properties": { + "hostIoLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold host latency value that determines whether to postpone a scheduled backup of a fileset on the host. Specify the threshold value in milliseconds (ms)." + }, + "cpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold host CPU utilization value that determines whether to postpone a scheduled backup of a fileset on the host. Specify the threshold value as a percentage." + } + } + }, + "FilesetArraySpec": { + "type": "object", + "properties": { + "proxyHostId": { + "type": "string", + "description": "ID assigned to a proxy host for array-enabled backups. This property is only required for array-enabled backups." + } + } + }, + "FilesetCreate": { + "allOf": [ + { + "type": "object", + "required": [ + "templateId" + ], + "properties": { + "hostId": { + "type": "string" + }, + "shareId": { + "type": "string" + }, + "templateId": { + "type": "string" + }, + "arraySpec": { + "$ref": "#/definitions/FilesetArraySpec" + }, + "isPassthrough": { + "type": "boolean", + "description": "A Boolean value that determines whether to take a direct archive backup. When 'true,' performs a direct archive backup. When 'false,' performs a normal backup. If not specified, this defaults to false." + }, + "enableSymlinkResolution": { + "type": "boolean", + "description": "A Boolean value that determines whether to resolve symlink in a fileset. When 'true,' performs a symlink resolution. When 'false,' performs no symlink resolution. If not specified, this defaults to false." + }, + "enableHardlinkSupport": { + "type": "boolean", + "description": "A Boolean value that determines whether to recognize and dedupe hardlinks in a fileset. When 'true,' performs a hardlink deduplication. When 'false,' performs a normal backup that treats hardlinks as normal files. If not specified, this defaults to false." + }, + "failoverClusterAppId": { + "type": "string", + "description": "ID of the failover cluster app." + }, + "snapMirrorLabelForFullBackup": { + "type": "string", + "description": "Rubrik CDM uses a prefix match to select the latest SnapMirror snapshot that matches this value during a full backup of a SnapMirror destination share." + }, + "snapMirrorLabelForIncrementalBackup": { + "type": "string", + "description": "Rubrik CDM selects the latest SnapMirror snapshot that matches this value using a prefix match during an incremental backup of a SnapMirror destination share." + } + } + } + ] + }, + "FilesetDetail": { + "allOf": [ + { + "$ref": "#/definitions/FilesetUpdate" + }, + { + "$ref": "#/definitions/FilesetSummary" + }, + { + "type": "object", + "required": [ + "snapshotCount" + ], + "properties": { + "protectionDate": { + "type": "string", + "format": "date-time" + }, + "snapshotCount": { + "type": "integer", + "format": "int32" + }, + "archivedSnapshotCount": { + "type": "integer", + "format": "int32" + }, + "snapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetSnapshotSummary" + } + }, + "localStorage": { + "type": "integer", + "format": "int64" + }, + "archiveStorage": { + "type": "integer", + "format": "int64" + }, + "preBackupScript": { + "type": "string", + "description": "Script to run before backup of this Fileset starts." + }, + "postBackupScript": { + "type": "string", + "description": "Script to run after backup of this Fileset ends." + }, + "backupScriptTimeout": { + "type": "integer", + "format": "int64", + "description": "Number of seconds after which the script is killed if it has not completed execution." + }, + "backupScriptErrorHandling": { + "type": "string", + "description": "Action taken if script fails. Options are \"abort\", \"continue\"." + } + } + } + ] + }, + "FilesetDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/FilesetDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "FilesetDownloadFilesJobConfig": { + "type": "object", + "required": [ + "sourceDirs" + ], + "properties": { + "sourceDirs": { + "type": "array", + "description": "An array containing the full source path of each file and folder that is part of the download job. The array must contain at least one path.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "FilesetExportFilesJobConfig": { + "type": "object", + "required": [ + "exportPathPairs", + "hostId" + ], + "properties": { + "hostId": { + "type": "string", + "description": "Host ID." + }, + "shareId": { + "type": "string", + "description": "Network share ID." + }, + "exportPathPairs": { + "type": "array", + "description": "Pairs of source path and destination path.", + "items": { + "$ref": "#/definitions/FilesetExportPathPair" + } + }, + "ignoreErrors": { + "type": "boolean", + "description": "Optional Boolean value that determines whether or not to ignore errors during an export. By default, this value is set to False. Set this value to True to ignore errors." + } + } + }, + "FilesetExportPathPair": { + "allOf": [ + { + "$ref": "#/definitions/ExportPathPair" + } + ] + }, + "FilesetOptions": { + "type": "object", + "properties": { + "allowBackupNetworkMounts": { + "type": "boolean", + "description": "Include or exclude locally-mounted remote file systems from backups." + }, + "allowBackupHiddenFoldersInNetworkMounts": { + "type": "boolean", + "description": "Include or exclude hidden folders inside locally-mounted remote file systems from backups." + }, + "useWindowsVss": { + "type": "boolean", + "description": "Use VSS during Windows backups." + } + } + }, + "FilesetRestoreFilesJobConfig": { + "type": "object", + "required": [ + "restoreConfig" + ], + "properties": { + "restoreConfig": { + "type": "array", + "description": "Absolute file path.. and restore path if not restored back to itself.", + "items": { + "$ref": "#/definitions/FilesetRestorePathPair" + } + }, + "ignoreErrors": { + "type": "boolean", + "description": "Optional field to determine if we should ignore single error during restore. Default value is false." + } + } + }, + "FilesetRestorePathPair": { + "allOf": [ + { + "$ref": "#/definitions/RestorePathPair" + } + ] + }, + "FilesetSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/FilesetSnapshotSummary" + }, + { + "type": "object", + "required": [ + "lastModified", + "size" + ], + "properties": { + "lastModified": { + "type": "string" + }, + "size": { + "type": "integer", + "format": "int64" + }, + "verbose": { + "$ref": "#/definitions/FilesetSnapshotVerbose" + } + } + } + ] + }, + "FilesetSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "filesetName" + ], + "properties": { + "filesetName": { + "type": "string" + }, + "fileCount": { + "type": "integer", + "format": "int64" + }, + "snapdiffUsed": { + "type": "boolean" + } + } + } + ] + }, + "FilesetSnapshotVerbose": { + "type": "object", + "required": [ + "hasFingerprint", + "partitionPaths" + ], + "properties": { + "hasFingerprint": { + "type": "boolean", + "description": "Whether or not the fileset snapshot has fingerprint info." + }, + "partitionPaths": { + "type": "array", + "description": "List a partition paths for the fileset snapshot.", + "items": { + "type": "string" + } + } + } + }, + "FilesetSummary": { + "allOf": [ + { + "$ref": "#/definitions/FilesetOptions" + }, + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "hostName", + "includes", + "isRelic", + "templateId", + "templateName" + ], + "properties": { + "hostId": { + "type": "string" + }, + "shareId": { + "type": "string" + }, + "hostName": { + "type": "string" + }, + "templateId": { + "type": "string" + }, + "templateName": { + "type": "string" + }, + "operatingSystemType": { + "type": "string" + }, + "effectiveSlaDomainId": { + "type": "string", + "description": "The ID of the effective SLA Domain for this fileset." + }, + "isEffectiveSlaDomainRetentionLocked": { + "type": "boolean", + "description": "An optional Boolean value that specifies whether the effective SLA Domain of a fileset is Retention Locked. When this value is 'true,' the SLA Domain is retention locked. When this value is 'false,' the SLA Domain is not Retention Locked." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "The name of the effective SLA Domain for this fileset." + }, + "effectiveSlaDomainPolarisManagedId": { + "type": "string", + "description": "Optional field containing Polaris managed ID of the effective SLA domain if it is Polaris managed." + }, + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + }, + "exceptions": { + "type": "array", + "items": { + "type": "string" + } + }, + "isRelic": { + "type": "boolean" + }, + "arraySpec": { + "$ref": "#/definitions/FilesetArraySpec" + }, + "isPassthrough": { + "type": "boolean", + "description": "A Boolean value that determines whether to take a direct archive backup. When 'true,' performs a direct archive backup. When 'false,' performs a normal backup. If not specified, this defaults to false." + }, + "enableSymlinkResolution": { + "type": "boolean", + "description": "A Boolean value that determines whether to resolve symlink in a fileset. When 'true,' performs a symlink resolution. When 'false,' performs no symlink resolution. If not specified, this defaults to false." + }, + "enableHardlinkSupport": { + "type": "boolean", + "description": "A Boolean value that determines whether to recognize and dedupe hardlinks in a fileset. When 'true,' performs a hardlink deduplication. When 'false,' performs a normal backup that treats hardlinks as normal files. If not specified, this defaults to false." + }, + "failoverClusterAppId": { + "type": "string", + "description": "ID of the failover cluster app." + }, + "failoverClusterAppName": { + "type": "string", + "description": "The name of the failover cluster app." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + }, + "snapMirrorLabelForFullBackup": { + "type": "string", + "description": "Rubrik CDM uses a prefix match to select the latest SnapMirror snapshot that matches this value during a full backup of a SnapMirror destination share." + }, + "snapMirrorLabelForIncrementalBackup": { + "type": "string", + "description": "Rubrik CDM selects the latest SnapMirror snapshot that matches this value using a prefix match during an incremental backup of a SnapMirror destination share." + } + } + } + ] + }, + "FilesetTemplateCreate": { + "allOf": [ + { + "$ref": "#/definitions/FilesetOptions" + }, + { + "type": "object", + "required": [ + "includes", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + }, + "exceptions": { + "type": "array", + "items": { + "type": "string" + } + }, + "operatingSystemType": { + "type": "string", + "description": "Operating system type of filesets created by template.", + "enum": [ + "UnixLike", + "Windows" + ] + }, + "shareType": { + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + "preBackupScript": { + "type": "string", + "description": "Script to run before backup of this fileset starts." + }, + "postBackupScript": { + "type": "string", + "description": "Script to run after backup of this fileset ends." + }, + "backupScriptTimeout": { + "type": "integer", + "format": "int64", + "description": "Number of seconds after which the script is killed if it has not completed execution." + }, + "backupScriptErrorHandling": { + "type": "string", + "description": "Action taken if script fails. Options are \"abort\", \"continue\"." + }, + "isArrayEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether the fileset is array-enabled. Set to true to indicate that the fileset is array-enabled. Set to false to indicate that the fileset is not array-enabled. When a fileset is array-enabled, the includes must be top-level LVM logical volume mount points." + } + } + } + ] + }, + "FilesetTemplateDetail": { + "allOf": [ + { + "$ref": "#/definitions/FilesetTemplateCreate" + }, + { + "type": "object", + "required": [ + "id", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "isArchived": { + "type": "boolean" + }, + "hostCount": { + "type": "integer", + "format": "int32", + "description": "Number of hosts where this template has been applied." + }, + "shareCount": { + "type": "integer", + "format": "int32", + "description": "Number of shares where this template has been applied." + }, + "isCreatedByKupr": { + "type": "boolean", + "description": "Specifies whether this is created by a Kupr Host." + } + } + } + ] + }, + "FilesetTemplateDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/FilesetTemplateDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "FilesetTemplatePatch": { + "allOf": [ + { + "$ref": "#/definitions/FilesetOptions" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + }, + "exceptions": { + "type": "array", + "items": { + "type": "string" + } + }, + "operatingSystemType": { + "type": "string", + "description": "Operating system type of filesets created by template.", + "enum": [ + "UnixLike", + "Windows" + ] + }, + "shareType": { + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + "preBackupScript": { + "type": "string", + "description": "Script to run before backup of this Fileset starts." + }, + "postBackupScript": { + "type": "string", + "description": "Script to run after backup of this Fileset ends." + }, + "backupScriptTimeout": { + "type": "integer", + "format": "int64", + "description": "Number of seconds after which the script is killed if it has not completed execution." + }, + "backupScriptErrorHandling": { + "type": "string", + "description": "Action taken if script fails. Options are \"abort\", \"continue\"." + } + } + } + ] + }, + "FilesetUpdate": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "Assign Fileset to SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "forceFull": { + "type": "boolean", + "description": "Whether to force a full on the whole fileset or certain partitions of the fileset. If this is set to true and no partitionIds are provided, then a full will be forced on the whole fileset. If set to true and partitionIds are provided, then we will force a full on only those partitions." + }, + "forceFullPartitionIds": { + "type": "array", + "description": "Assign partition ids to set the force full. In order for this to be valid input, forceFull must be set to true.", + "items": { + "type": "integer", + "format": "int32" + } + }, + "snapMirrorLabelForFullBackup": { + "type": "string", + "description": "Rubrik CDM uses a prefix match to select the latest SnapMirror snapshot that matches this value during a full backup of a SnapMirror destination share." + }, + "snapMirrorLabelForIncrementalBackup": { + "type": "string", + "description": "Rubrik CDM selects the latest SnapMirror snapshot that matches this value using a prefix match during an incremental backup of a SnapMirror destination share." + } + } + }, + "ManagedObjectDescendantCountFilesetFields": { + "type": "object", + "properties": { + "fileset": { + "type": "integer", + "format": "int32", + "description": "Number of physical Linux/Windows filesets." + }, + "shareFileset": { + "type": "integer", + "format": "int32", + "description": "Number of share filesets." + } + } + }, + "FolderDetail": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "descendants", + "entities", + "folderType" + ], + "properties": { + "folderType": { + "type": "string" + }, + "entities": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedEntitySummary" + } + }, + "descendants": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedEntitySummary" + } + } + } + } + ] + }, + "FolderHierarchy": { + "allOf": [ + { + "$ref": "#/definitions/EffectiveSlaHolder" + }, + { + "type": "object", + "required": [ + "configuredSlaDomainId", + "folderType", + "id", + "name", + "otherContent", + "subFolderHierarchies" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "folderType": { + "type": "string" + }, + "configuredSlaDomainId": { + "type": "string" + }, + "configuredSlaDomainType": { + "type": "string", + "description": "Specifies whether the SLA Domain is used for protection or retention." + }, + "subFolderHierarchies": { + "type": "array", + "items": { + "$ref": "#/definitions/FolderHierarchy" + } + }, + "otherContent": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedEntitySummary" + } + }, + "primaryClusterId": { + "type": "string" + } + } + } + ] + }, + "ManagedEntitySummary": { + "type": "object", + "required": [ + "entityType", + "id", + "name" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "entityType": { + "type": "string" + } + } + }, + "GraphQlQuery": { + "type": "object", + "required": [ + "query" + ], + "properties": { + "query": { + "type": "string" + }, + "variables": { + "type": "object" + }, + "operationName": { + "type": "string" + } + } + }, + "GraphQlResponse": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "object" + }, + "error": { + "type": "string" + } + } + }, + "BaseGuestCredential": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username for the account used to login to the VM guest OS. To include a domain, use the format \\." + }, + "password": { + "type": "string", + "description": "Password for the account used to login to the VM guest OS.", + "x-secret": true + } + } + }, + "BaseGuestCredentialDetail": { + "type": "object", + "required": [ + "username" + ], + "properties": { + "username": { + "type": "string" + } + } + }, + "GuestCredentialDefinition": { + "allOf": [ + { + "$ref": "#/definitions/BaseGuestCredential" + }, + { + "type": "object", + "properties": { + "domain": { + "type": "string" + } + } + } + ] + }, + "GuestCredentialDetail": { + "allOf": [ + { + "$ref": "#/definitions/BaseGuestCredentialDetail" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "domain": { + "type": "string" + } + } + } + ] + }, + "GuestCredentialDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/GuestCredentialDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostFilesetDetail": { + "type": "object", + "required": [ + "hostname", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "agentId": { + "type": "string" + }, + "operatingSystem": { + "type": "string" + }, + "operatingSystemType": { + "type": "string" + }, + "status": { + "type": "string" + }, + "filesets": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetDetail" + } + } + } + }, + "HostFilesetShareDetail": { + "type": "object", + "required": [ + "exportPoint", + "filesets", + "hostname", + "id", + "primaryClusterId", + "shareType", + "status" + ], + "properties": { + "id": { + "type": "string" + }, + "hostId": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "vendorType": { + "$ref": "#/definitions/NasVendorType" + }, + "shareType": { + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + "exportPoint": { + "type": "string" + }, + "status": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "filesets": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetDetail" + } + }, + "username": { + "type": "string" + }, + "domain": { + "type": "string" + }, + "hostShareParameters": { + "$ref": "#/definitions/HostShareParameters" + } + } + }, + "HostFilesetShareSummary": { + "type": "object", + "required": [ + "exportPoint", + "filesets", + "hostname", + "id", + "primaryClusterId", + "shareType", + "status" + ], + "properties": { + "id": { + "type": "string" + }, + "hostId": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "shareType": { + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + "exportPoint": { + "type": "string" + }, + "status": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "filesets": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetSummary" + } + }, + "username": { + "type": "string" + }, + "domain": { + "type": "string" + }, + "hostShareParameters": { + "$ref": "#/definitions/HostShareParameters" + } + } + }, + "HostFilesetShareSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostFilesetShareSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostFilesetSummary": { + "type": "object", + "required": [ + "hostname", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "operatingSystem": { + "type": "string" + }, + "operatingSystemType": { + "type": "string" + }, + "status": { + "type": "string" + }, + "filesets": { + "type": "array", + "items": { + "$ref": "#/definitions/FilesetSummary" + } + } + } + }, + "HostFilesetSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostFilesetSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HdfsBaseConfig": { + "type": "object", + "required": [ + "hosts" + ], + "properties": { + "hosts": { + "type": "array", + "description": "List of Hdfs Namenode Hosts.", + "items": { + "$ref": "#/definitions/HdfsHost" + } + }, + "username": { + "type": "string", + "description": "Username to access Hdfs API." + }, + "apiToken": { + "type": "string", + "description": "API token to access Hdfs." + }, + "kerberosTicket": { + "type": "string", + "description": "Ticket Cache Path of Kerberos Ticket." + }, + "nameservices": { + "type": "string", + "description": "Logical name for nameservice for Hdfs HA." + } + } + }, + "HdfsConfig": { + "allOf": [ + { + "$ref": "#/definitions/HdfsBaseConfig" + } + ] + }, + "HdfsHost": { + "type": "object", + "required": [ + "hostname", + "port" + ], + "properties": { + "hostname": { + "type": "string", + "description": "Hostname or Ip of Namenode." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Port number of Namenode." + } + } + }, + "HostConnectivitySummary": { + "type": "object", + "required": [ + "action", + "status" + ], + "properties": { + "action": { + "type": "string" + }, + "status": { + "type": "string" + } + } + }, + "HostDetail": { + "allOf": [ + { + "$ref": "#/definitions/HostSummary" + }, + { + "type": "object", + "required": [ + "hostVfdDriverState", + "isRelic", + "mssqlCbtDriverInstalled" + ], + "properties": { + "agentId": { + "type": "string", + "description": "ID of the Rubrik Backup Service (RBS) installed on the host." + }, + "compressionEnabled": { + "type": "boolean", + "description": "Indicates if compression is enabled while transferring data between the host and the Rubrik cluster." + }, + "isRelic": { + "type": "boolean", + "description": "A relic host is deleted, but still may have snapshots associated with its children (e.g. Fileset)." + }, + "mssqlCbtDriverInstalled": { + "type": "boolean", + "description": "Indicates if the CBT driver is installed for SQL Server instances on the specified Windows host. Set to true when the CBT driver is installed. Set to false when the CBT driver is not installed." + }, + "hostVfdEnabled": { + "description": "Specifies the status of VFD-based volume backups on Windows hosts. The value is 'Enabled' when VFD-based volume backups are enabled. The value is 'Disabled' when VFD-based volume backups are disabled.", + "$ref": "#/definitions/HostVfdInstallConfig" + }, + "hostVfdDriverState": { + "description": "Specifies the installation status of the VFD driver on a Windows host. The value is 'NotInstalled' when the driver is absent. The value is 'Installed' when the driver is present. The value is 'RestartRequred' when the driver is present but requires a restart of the Windows host in order to function.", + "$ref": "#/definitions/HostVfdState" + }, + "oracleSysDbaUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with sysdba privileges.\n" + }, + "oracleQueryUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with query privileges." + }, + "isOracleHost": { + "type": "boolean", + "description": "Specifies whether this is an Oracle host. This indicates whether to show Oracle discovery fields in the UI.\n" + } + } + } + ] + }, + "HostDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostDiagnosisSummary": { + "type": "object", + "properties": { + "connectivity": { + "type": "array", + "items": { + "$ref": "#/definitions/HostConnectivitySummary" + } + } + } + }, + "HostFlag": { + "type": "object", + "required": [ + "flag", + "value" + ], + "properties": { + "flag": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "HostMakePrimaryRequest": { + "type": "object", + "properties": { + "ids": { + "type": "array", + "description": "IDs of hosts to migrate.", + "items": { + "type": "string" + } + }, + "oldPrimaryClusterUuid": { + "type": "string", + "description": "For all hosts and virtual machines registered with this cluster, make itself the primary if the current primary is oldPrimaryClusterUuid." + } + } + }, + "HostRegister": { + "type": "object", + "required": [ + "hostname" + ], + "properties": { + "hostname": { + "type": "string" + }, + "alias": { + "type": "string", + "description": "A user-specified string that returns this host in searches." + }, + "hasAgent": { + "type": "boolean" + }, + "organizationId": { + "type": "string", + "description": "The ID of the organization to which the host is assigned." + }, + "nasConfig": { + "$ref": "#/definitions/NasConfig" + }, + "hdfsConfig": { + "$ref": "#/definitions/HdfsConfig" + }, + "oracleSysDbaUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with sysdba privileges. The account must have sysdba privileges for a specified Oracle installation to enable Oracle discovery queries for that installation. This field overrides the configured global sysdba user for the specified Oracle installation." + }, + "oracleQueryUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with query privileges. The account must have query privileges for a specified Oracle installation to enable Oracle discovery queries for that installation.\n" + }, + "isOracleHost": { + "type": "boolean", + "description": "A Boolean that specifies whether to discover Oracle information at registration. A value of 'true' discovers Oracle information at registration.\n" + } + } + }, + "HostShareCreate": { + "type": "object", + "required": [ + "exportPoint", + "hostId", + "shareType" + ], + "properties": { + "hostId": { + "type": "string" + }, + "shareType": { + "type": "string", + "enum": [ + "NFS", + "SMB" + ] + }, + "exportPoint": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "domain": { + "type": "string" + }, + "hostShareParameters": { + "$ref": "#/definitions/HostShareParameters" + } + } + }, + "HostShareCredentialDefinition": { + "type": "object", + "required": [ + "hostId", + "username" + ], + "properties": { + "hostId": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "domain": { + "type": "string" + } + } + }, + "HostShareCredentialDetail": { + "type": "object", + "required": [ + "hostId", + "username" + ], + "properties": { + "hostId": { + "type": "string" + }, + "username": { + "type": "string" + }, + "domain": { + "type": "string" + } + } + }, + "HostShareCredentialDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostShareCredentialDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostShareDetail": { + "type": "object", + "required": [ + "exportPoint", + "hostname", + "id", + "primaryClusterId", + "shareType", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "The unique ID of the NAS Share." + }, + "hostId": { + "type": "string", + "description": "The host ID of the NAS Share host." + }, + "hostname": { + "type": "string", + "description": "The hostname of the NAS host." + }, + "vendorType": { + "description": "The vendor type of the NAS host the share belongs to.", + "$ref": "#/definitions/NasVendorType" + }, + "shareType": { + "type": "string", + "description": "The type of NAS share.", + "enum": [ + "NFS", + "SMB" + ] + }, + "exportPoint": { + "type": "string", + "description": "The NFS export point or SMB share name for the NAS share." + }, + "status": { + "type": "string", + "description": "The status of connection between the Rubrik cluster and the NAS Share. Possible responses are Connected and Disconnected." + }, + "primaryClusterId": { + "type": "string", + "description": "The ID of the primary Rubrik cluster." + }, + "username": { + "type": "string", + "description": "The username to access the NAS share." + }, + "domain": { + "type": "string", + "description": "The domain of the SMB share." + }, + "hostShareParameters": { + "description": "Additional share parameters.", + "$ref": "#/definitions/HostShareParameters" + } + } + }, + "HostShareDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostShareDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostShareParameters": { + "type": "object", + "properties": { + "isNetAppSnapDiffEnabled": { + "type": "boolean", + "description": "Indicates if SnapDiff is enabled on NetApp NAS share. When this value is 'true', metadata fetches during backup operations use the NetApp SnapDiff feature. The SnapDiff feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isIsilonChangelistEnabled": { + "type": "boolean", + "description": "Indicates if Changelist is enabled on Isilon NAS share. When this value is 'true', metadata fetches during backup operations use the Isilon Changelist feature. The Changelist feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isOnNetAppSnapMirrorDestVolume": { + "type": "boolean", + "description": "Indicates whether the share is on a SnapMirror destination volume on a NetApp NAS share. When this value is 'true', fileset backup operations pick the latest Netapp snapshot on the volume, subject to the configured label matching. During share registration, Rubrik checks with NetApp NAS to find out whether the share is on SnapMirror destination and sets this parameter." + } + } + }, + "HostSharePatch": { + "type": "object", + "properties": { + "exportPoint": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "domain": { + "type": "string" + }, + "hostShareParameters": { + "$ref": "#/definitions/HostShareParameters" + } + } + }, + "HostShareUpdate": { + "type": "object", + "required": [ + "shareId", + "updateProperties" + ], + "properties": { + "shareId": { + "type": "string", + "description": "ID of the network share." + }, + "updateProperties": { + "$ref": "#/definitions/HostSharePatch" + } + } + }, + "HostSummary": { + "type": "object", + "required": [ + "hostname", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for host." + }, + "name": { + "type": "string", + "description": "IP address or hostname of the host." + }, + "hostname": { + "type": "string", + "description": "Deprecated. Please use 'name' instead." + }, + "alias": { + "type": "string", + "description": "A user-specified string that returns this host in searches." + }, + "primaryClusterId": { + "type": "string", + "description": "ID of the Rubrik cluster to which the host belongs." + }, + "operatingSystem": { + "type": "string", + "description": "Operating system of the host. One of Windows, Linux, AIX, HPUX, and SunOS." + }, + "operatingSystemType": { + "type": "string", + "description": "The operating system of the host. Possible choices are Windows, Linux, AIX, HPUX, SunOS." + }, + "status": { + "type": "string", + "description": "Specifies the connect status for the host. Status is Refreshing while discovery is running or Connected once discovery was successful and the host is available." + }, + "nasBaseConfig": { + "$ref": "#/definitions/NasBaseConfig" + }, + "hdfsBaseConfig": { + "$ref": "#/definitions/HdfsBaseConfig" + }, + "mssqlCbtEnabled": { + "description": "Property that indicates whether CBT is enabled for backups of SQL Server databases on a Windows host. Set to Enabled when CBT based backups of SQL Server databases for the specified Windows host is enabled. Set to Disabled when CBT based backups of SQL Server databases for the specified Windows host is turned off. Set to Default when the Windows host inherits the global CBT setting.", + "$ref": "#/definitions/MssqlCbtStatusType" + }, + "mssqlCbtEffectiveStatus": { + "description": "Property that indicates whether CBT is enabled for backups of SQL Server databases on a Windows host. When the value of mssqlCbtEnabled is Default, this property has the same value as the global CBT setting. In all other cases, this property has the same value as mssqlCbtEnabled. To change the global CBT setting, use the SQL Server default property update endpoint.", + "$ref": "#/definitions/MssqlCbtEffectiveStatusType" + }, + "organizationId": { + "type": "string", + "description": "The ID of the organization to which the host is assigned (set by envoy)." + }, + "organizationName": { + "type": "string", + "description": "The name of the organization to which the host is assigned (set by envoy)." + } + } + }, + "HostSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HostSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HostUpdate": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "alias": { + "type": "string", + "description": "A user-specified string that returns this host in searches." + }, + "compressionEnabled": { + "type": "boolean" + }, + "nasConfig": { + "$ref": "#/definitions/NasConfig" + }, + "hdfsConfig": { + "$ref": "#/definitions/HdfsConfig" + }, + "mssqlCbtEnabled": { + "$ref": "#/definitions/MssqlCbtStatusType" + }, + "mssqlCbtDriverInstalled": { + "type": "boolean", + "description": "When CBT is disabled on the specified Windows host, set this property to false to instruct the Rubrik cluster to remove the CBT driver from a specified Windows host. Before using this property, disable CBT on the specified Windows host by setting the value of mssqlCbtEnabled to Disabled." + }, + "hostVfdEnabled": { + "$ref": "#/definitions/HostVfdInstallConfig" + }, + "hostVfdDriverInstalled": { + "type": "boolean", + "description": "When VFD is disabled on the specified Windows host, set this property to false to instruct the Rubrik cluster to remove the VFD driver from a specified Windows host. Before using this property, disable VFD on the specified Windows host by setting the value of HostVfdEnabled to Disabled." + }, + "oracleSysDbaUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with sysdba privileges. The account must have sysdba privileges for a specified Oracle installation to enable Oracle discovery queries for that installation. This field overrides the configured global sysdba user for the specified Oracle installation.\n" + }, + "oracleQueryUser": { + "type": "string", + "description": "Specifies the Oracle username for an account with query privileges. The account must have query privileges for a specified Oracle installation to enable Oracle discovery queries for that installation.\n" + }, + "isOracleHost": { + "type": "boolean", + "description": "A Boolean that specifies whether to discover Oracle information during host refresh. A value of 'true' discovers Oracle information during host refresh.\n" + } + } + }, + "HostUpdateId": { + "type": "object", + "required": [ + "hostId", + "updateProperties" + ], + "properties": { + "hostId": { + "type": "string", + "description": "ID of the host." + }, + "updateProperties": { + "$ref": "#/definitions/HostUpdate" + } + } + }, + "HostVfdInstallConfig": { + "type": "string", + "description": "VFD host support status.", + "enum": [ + "Enabled", + "Disabled" + ] + }, + "HostVfdInstallRequest": { + "type": "object", + "required": [ + "hostIds", + "install" + ], + "properties": { + "hostIds": { + "type": "array", + "description": "Ids of host on which to install/uninstall VFD.", + "items": { + "type": "string" + } + }, + "install": { + "type": "boolean" + } + } + }, + "HostVfdInstallResponse": { + "type": "object", + "required": [ + "hostId", + "hostVfdDriverState" + ], + "properties": { + "hostId": { + "type": "string" + }, + "errorInfo": { + "type": "string" + }, + "hostVfdDriverState": { + "description": "Specifies the installation status of the VFD driver on a Windows host. The value is 'NotInstalled' when the driver is absent. The value is 'Installed' when the driver is present. The value is 'RestartRequred' when the driver is present but requires a restart of the Windows host in order to function.", + "$ref": "#/definitions/HostVfdState" + } + } + }, + "HostVfdState": { + "type": "string", + "description": "VFD host install state.", + "enum": [ + "NotInstalled", + "Installed", + "InstalledButRestartRequired", + "UninstalledButRestartRequired" + ] + }, + "KuprHostRegister": { + "type": "object", + "required": [ + "agentCertificate", + "agentId", + "hostname", + "operatingSystemInfo", + "operatingSystemType" + ], + "properties": { + "hostname": { + "type": "string" + }, + "agentId": { + "type": "string", + "description": "The agent ID of the registered host." + }, + "agentCertificate": { + "type": "string", + "description": "The agent certificate of the registered host. X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----.\n" + }, + "ipv4Addresses": { + "type": "array", + "items": { + "type": "string" + } + }, + "operatingSystemType": { + "type": "string", + "description": "Operating system of a specified kupr host.", + "enum": [ + "Linux" + ] + }, + "operatingSystemInfo": { + "type": "string", + "description": "Operating system information of a specified kupr host.", + "enum": [ + "Linux", + "Rhel", + "Ubuntu", + "Suse", + "Centos" + ] + }, + "operatingSystemVersion": { + "type": "string", + "description": "Operating system version of a specified kupr host." + } + } + }, + "KuprHostUpdate": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "agentId": { + "type": "string", + "description": "The agent ID of the registered host." + }, + "agentCertificate": { + "type": "string", + "description": "The agent certificate of the registered host. X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----.\n" + }, + "ipv4Addresses": { + "type": "array", + "description": "An array containing the IPv4 address to Kupr host.\n", + "items": { + "type": "string" + } + }, + "operatingSystemType": { + "type": "string", + "description": "Operating system of a specified kupr host.", + "enum": [ + "Linux" + ] + }, + "operatingSystemInfo": { + "type": "string", + "description": "Operating system information of a specified kupr host.", + "enum": [ + "Linux", + "Rhel", + "Ubuntu", + "Suse", + "Centos" + ] + }, + "operatingSystemVersion": { + "type": "string", + "description": "Operating system version of a specified kupr host." + } + } + }, + "MssqlCbtEffectiveStatusType": { + "type": "string", + "description": "Effective CBT host status.", + "enum": [ + "On", + "Off", + "OnDefault", + "OffDefault" + ] + }, + "MssqlCbtStatusType": { + "type": "string", + "description": "CBT host support status.", + "enum": [ + "Enabled", + "Disabled", + "Default" + ] + }, + "NasBaseConfig": { + "type": "object", + "required": [ + "vendorType" + ], + "properties": { + "vendorType": { + "type": "string", + "description": "Type of NAS vendor 'ISILON/NETAPP/FLASHBLADE'." + }, + "apiUsername": { + "type": "string", + "description": "Username to access NAS API." + }, + "apiCertificate": { + "type": "string", + "description": "TLS certification to validate NAS server." + }, + "apiHostname": { + "type": "string", + "description": "Hostname or IP used in the NAS API calls." + }, + "apiEndpoint": { + "type": "string", + "description": "API endpoint to access NAS API 'FLASHBLADE'." + }, + "zoneName": { + "type": "string", + "description": "Name of the Isilon zone that data IP belongs to." + }, + "isNetAppSnapDiffEnabled": { + "type": "boolean", + "description": "Indicates if SnapDiff is enabled on NetApp NAS share. When this value is 'true', metadata fetches during backup operations use the NetApp SnapDiff feature. The SnapDiff feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isIsilonChangelistEnabled": { + "type": "boolean", + "description": "Indicates if Changelist is enabled on Isilon NAS share. When this value is 'true', metadata fetches during backup operations use the Isilon Changelist feature. The Changelist feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isShareAutoDiscoveryEnabled": { + "type": "boolean", + "description": "Specifies whether shares on the NAS host are automatically discovered. When this value is 'true', Rubrik periodically (every 30 minutes by default) connects to the NAS host to discover NFS and SMB shares." + } + } + }, + "NasConfig": { + "type": "object", + "required": [ + "vendorType" + ], + "properties": { + "vendorType": { + "type": "string", + "description": "Type of NAS vendor 'ISILON/NETAPP/FLASHBLADE'." + }, + "apiUsername": { + "type": "string", + "description": "Username to access NAS API 'ISILON/NETAPP'." + }, + "apiPassword": { + "type": "string", + "description": "Password to access NAS API 'ISILON/NETAPP'.", + "x-secret": true + }, + "apiToken": { + "type": "string", + "description": "API token to access NAS API 'FLASHBLADE'.", + "x-secret": true + }, + "apiEndpoint": { + "type": "string", + "description": "API endpoint to access NAS API 'FLASHBLADE'." + }, + "apiCertificate": { + "type": "string", + "description": "TLS certification to validate NAS server." + }, + "apiHostname": { + "type": "string", + "description": "Hostname or IP used in the NAS API calls." + }, + "zoneName": { + "type": "string", + "description": "Name of the Isilon zone that data IP belongs to." + }, + "isNetAppSnapDiffEnabled": { + "type": "boolean", + "description": "Indicates if SnapDiff is enabled on NetApp NAS share. When this value is 'true', metadata fetches during backup operations use the NetApp SnapDiff feature. The SnapDiff feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isIsilonChangelistEnabled": { + "type": "boolean", + "description": "Indicates if Changelist is enabled on Isilon NAS share. When this value is 'true', metadata fetches during backup operations use the Isilon Changelist feature. The Changelist feature improves incremental backup performance by tracking the difference between two snapshots, reducing the metadata scanning time during a backup job." + }, + "isShareAutoDiscoveryEnabled": { + "type": "boolean", + "description": "Specifies whether shares on the NAS host are automatically discovered. When this value is 'true', Rubrik periodically (every 30 minutes by default) connects to the NAS host to discover NFS and SMB shares." + } + } + }, + "NasVendorType": { + "type": "string", + "description": "NAS Vendor Type.", + "enum": [ + "NETAPP", + "ISILON", + "FLASHBLADE" + ] + }, + "HypervClusterDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervClusterUpdate" + }, + { + "$ref": "#/definitions/HypervClusterSummary" + } + ] + }, + "HypervClusterSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "name", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the Hyper-V cluster." + }, + "name": { + "type": "string", + "description": "The display name of the Hyper-V cluster." + }, + "primaryClusterId": { + "type": "string" + } + } + } + ] + }, + "HypervClusterSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervClusterSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervClusterUpdate": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this VM to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + } + } + }, + "HypervHierarchyObjectDescendentCount": { + "type": "object", + "properties": { + "scvmm": { + "type": "integer", + "format": "int32" + }, + "cluster": { + "type": "integer", + "format": "int32" + }, + "host": { + "type": "integer", + "format": "int32" + }, + "vm": { + "type": "integer", + "format": "int32" + } + } + }, + "HypervHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendentCount", + "isDeleted", + "objectType" + ], + "properties": { + "objectType": { + "type": "string", + "description": "Type of object.", + "enum": [ + "scvmm", + "cluster", + "host", + "vm" + ] + }, + "connectionStatus": { + "type": "string", + "description": "Connection status of hyperv host. This is defined only for hyperv host.", + "enum": [ + "Connected", + "Connecting", + "Disconnected" + ] + }, + "descendentCount": { + "$ref": "#/definitions/HypervHierarchyObjectDescendentCount" + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the hyperv hierarchy object is deleted." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "HypervHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervHostDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervHostUpdate" + }, + { + "$ref": "#/definitions/HypervHostSummary" + } + ] + }, + "HypervHostSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "hostname", + "id", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the Hyper-V host." + }, + "hostname": { + "type": "string", + "description": "IP Address or the hostname using which the host was added." + }, + "primaryClusterId": { + "type": "string" + } + } + } + ] + }, + "HypervHostSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervHostSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervHostUpdate": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this hyperv host to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + } + } + }, + "HypervScvmmDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervScvmmUpdate" + }, + { + "$ref": "#/definitions/HypervScvmmSummary" + } + ] + }, + "HypervScvmmRegister": { + "type": "object", + "required": [ + "hostname", + "runAsAccount", + "shouldDeployAgent" + ], + "properties": { + "hostname": { + "type": "string", + "description": "Name of the SCVMM host." + }, + "runAsAccount": { + "type": "string", + "description": "The RunAs account which will be used to install connector on hosts." + }, + "shouldDeployAgent": { + "type": "boolean", + "description": "Flag to specify if Rubrik can deploy connector to hosts. If true, Rubrik tries to deploy connector to the hyperv hosts. If false, Rubrik deployment of connector will be handled by the client." + } + } + }, + "HypervScvmmSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "primaryClusterId", + "runAsAccount", + "shouldDeployAgent", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the Hyper-V SCVMM." + }, + "primaryClusterId": { + "type": "string" + }, + "runAsAccount": { + "type": "string", + "description": "The RunAs account which will be used to install connector on hosts." + }, + "status": { + "type": "string", + "description": "Connection status of the SCVMM server." + }, + "shouldDeployAgent": { + "type": "boolean", + "description": "Flag to specify if Rubrik can deploy connector to hosts. If true, Rubrik tries to deploy connector to the hyperv hosts. If false, Rubrik deployment of connector will be handled by the client." + } + } + } + ] + }, + "HypervScvmmSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervScvmmSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervScvmmUpdate": { + "type": "object", + "properties": { + "hostname": { + "type": "string", + "description": "Name of the SCVMM host." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this SCVMM to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "runAsAccount": { + "type": "string", + "description": "The RunAs account which will be used to install connector on hosts." + }, + "shouldDeployAgent": { + "type": "boolean", + "description": "Flag to specify if Rubrik can deploy connector to hosts. If true, Rubrik tries to deploy connector to the hyperv hosts. If false, Rubrik deployment of connector will be handled by the client." + } + } + }, + "AdaptiveThrottlingSettingsHypervFields": { + "type": "object", + "properties": { + "hypervThrottlingSettings": { + "$ref": "#/definitions/HypervAdaptiveThrottlingSettings" + } + } + }, + "HypervAdaptiveThrottlingSettings": { + "type": "object", + "properties": { + "hostIoLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold Hyper-V host latency value that determines whether to postpone a scheduled snapshot of a virtual machine on the host. Specify the threshold value in milliseconds (ms)." + }, + "hostCpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold Hyper-V host CPU utilization value that determines whether to postpone a scheduled snapshot of a virtual machine on the host. Specify the threshold value as a percentage." + }, + "vmCpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold virtual machine CPU utilization value that determines whether to postpone a scheduled snapshot of a virtual machine on a Hyper-V host. Specify the threshold value as a percentage." + } + } + }, + "HypervDataLocationUsage": { + "type": "object", + "required": [ + "numHypervVmsArchived" + ], + "properties": { + "numHypervVmsArchived": { + "type": "integer", + "format": "int32" + } + } + }, + "HypervDownloadFileJobConfig": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "type": "string", + "description": "Absolute file path." + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "HypervDownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array containing the full source path of each file and folder that is part of the download job. The array must contain at least one path. When the source is a Windows virtual machine, the paths must all be on the same disk.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "HypervExportSnapshotJobConfig": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "vmName": { + "type": "string", + "description": "name of the new VM for export." + }, + "hostId": { + "type": "string", + "description": "ID of the host to export to." + }, + "path": { + "type": "string", + "description": "Destination path for the new VM virtual disks." + }, + "disableNetwork": { + "type": "boolean", + "description": "Sets the state of the network interfaces when the virtual machine is mounted. Use 'false' to enable the network interfaces. Use 'true' to disable the network interfaces. Disabling the interfaces can prevent IP conflicts. Default value is 'true'.", + "default": false + }, + "removeNetworkDevices": { + "type": "boolean", + "description": "Determines whether to remove the network interfaces from the mounted virtual machine. Set to 'true' to remove all network interfaces. The default value is 'false'.", + "default": false + }, + "powerOn": { + "type": "boolean", + "description": "Whether the VM should be powered on after export. Default value is true." + } + } + }, + "HypervInstantRecoveryJobConfig": { + "type": "object", + "properties": { + "vmName": { + "type": "string", + "description": "name of the new VM to instantly recover." + }, + "hostId": { + "type": "string", + "description": "ID of the host to instantly recover to." + } + } + }, + "HypervMountSnapshotJobConfig": { + "type": "object", + "properties": { + "hostId": { + "type": "string", + "description": "ID of host for the mount to use." + }, + "vmName": { + "type": "string", + "description": "Name of the mounted VM." + }, + "disableNetwork": { + "type": "boolean", + "description": "Sets the state of the network interfaces when the virtual machine is mounted. Use 'false' to enable the network interfaces. Use 'true' to disable the network interfaces. Disabling the interfaces can prevent IP conflicts. Default value is 'true'.", + "default": false + }, + "removeNetworkDevices": { + "type": "boolean", + "description": "Determines whether to remove the network interfaces from the mounted virtual machine. Set to 'true' to remove all network interfaces. The default value is 'false'.", + "default": false + }, + "powerOn": { + "type": "boolean", + "description": "Whether the VM should be powered on after mount. Default value is true." + } + } + }, + "HypervRestoreFileConfig": { + "type": "object", + "required": [ + "path", + "restorePath" + ], + "properties": { + "path": { + "type": "string", + "description": "Absolute file path." + }, + "restorePath": { + "type": "string", + "description": "Directory of folder to copy files into." + } + } + }, + "HypervRestoreFilesConfig": { + "type": "object", + "required": [ + "restoreConfig" + ], + "properties": { + "restoreConfig": { + "type": "array", + "description": "Absolute path of the target location for the copied files.", + "items": { + "$ref": "#/definitions/HypervRestoreFileConfig" + } + } + } + }, + "HypervUpdateMountConfig": { + "type": "object", + "required": [ + "powerStatus" + ], + "properties": { + "powerStatus": { + "type": "boolean", + "description": "True to power on, false to power off." + } + } + }, + "HypervVirtualDiskInfo": { + "type": "object", + "required": [ + "id", + "name", + "path" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the Hyper-V virtual disk." + }, + "name": { + "type": "string", + "description": "The name of the Hyper-V virtual disk." + }, + "path": { + "type": "string", + "description": "The path of the Hyper-V virtual disk." + } + } + }, + "HypervVirtualMachineDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervVirtualMachineUpdate" + }, + { + "$ref": "#/definitions/HypervVirtualMachineSummary" + }, + { + "type": "object", + "required": [ + "guestOsType", + "virtualDiskInfo" + ], + "properties": { + "operatingSystemType": { + "type": "string", + "enum": [ + "Linux", + "Windows" + ] + }, + "isAgentRegistered": { + "type": "boolean", + "description": "Returns whether the Rubrik connector is installed and service is registered." + }, + "guestOsType": { + "type": "string", + "description": "Type of operating system used by the Hyper-V virtual machine.", + "enum": [ + "Linux", + "Windows", + "Unknown" + ] + }, + "virtualDiskInfo": { + "type": "array", + "description": "Brief information about all virtual disks of the selected virtual machine.", + "items": { + "$ref": "#/definitions/HypervVirtualDiskInfo" + } + } + } + } + ] + }, + "HypervVirtualMachineMountDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervVirtualMachineMountSummary" + } + ] + }, + "HypervVirtualMachineMountListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for Hyper-V snapshot mounts.", + "enum": [ + "SnapshotDate", + "VmName", + "MountedVmName", + "PowerStatus" + ] + }, + "HypervVirtualMachineMountSummary": { + "type": "object", + "required": [ + "id", + "isReady", + "powerStatus", + "snapshotDate", + "vmId", + "vmName" + ], + "properties": { + "id": { + "type": "string" + }, + "snapshotDate": { + "type": "string", + "format": "date-time" + }, + "vmId": { + "type": "string" + }, + "vmName": { + "type": "string" + }, + "mountedVmId": { + "type": "string" + }, + "mountedVmName": { + "type": "string" + }, + "hostId": { + "type": "string" + }, + "hostName": { + "type": "string" + }, + "isReady": { + "type": "boolean" + }, + "powerStatus": { + "type": "string", + "description": "The power status of the mounted VM(ON,OFF,SLEEP etc.).", + "enum": [ + "PoweringOn", + "PoweredOn", + "PoweringOff", + "PoweredOff" + ] + }, + "mountRequestId": { + "type": "string" + }, + "unmountRequestId": { + "type": "string" + } + } + }, + "HypervVirtualMachineMountSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervVirtualMachineMountSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervVirtualMachineSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/HypervVirtualMachineSnapshotSummary" + } + ] + }, + "HypervVirtualMachineSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "vmName" + ], + "properties": { + "vmName": { + "type": "string" + }, + "usedFastVhdx": { + "type": "boolean", + "description": "Indicates if the snapshot was taken using the fast VHDX builder." + }, + "fileSizeInBytes": { + "type": "integer", + "format": "int64", + "description": "Logical size, in bytes, of the Hyper-V virtual machine snapshot." + } + } + } + ] + }, + "HypervVirtualMachineSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervVirtualMachineSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervVirtualMachineSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "hostId", + "id", + "infraPath", + "isRelic", + "name" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "hostId": { + "type": "string", + "description": "The ID of the Hyper-V host." + }, + "isRelic": { + "type": "boolean" + }, + "infraPath": { + "type": "array", + "description": "Brief info of all the objects in the infrastructure path to this VM.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "cloudInstantiationSpec": { + "description": "Cloud instantiation specification for the selected virtual machine.", + "$ref": "#/definitions/CloudInstantiationSpec" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + }, + "forceFull": { + "type": "boolean", + "description": "Indicates if the Hyper-V virtual machine is configured to perform a full snapshot for the next backup." + } + } + } + ] + }, + "HypervVirtualMachineSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/HypervVirtualMachineSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "HypervVirtualMachineUpdate": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this VM to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "cloudInstantiationSpec": { + "description": "Cloud instantiation specification for the selected virtual machine.", + "$ref": "#/definitions/CloudInstantiationSpec" + }, + "virtualDiskIdsExcludedFromSnapshot": { + "type": "array", + "description": "A comma-separated list of Hyper-V virtual disk IDs that are excluded from backup.", + "items": { + "type": "string" + } + } + } + }, + "HypervVmSlaObjectCount": { + "type": "object", + "properties": { + "numHypervVms": { + "type": "integer", + "format": "int32", + "description": "The number of Hyper-V virtual machines protected under this SLA domain." + } + } + }, + "GlobalAkkaConfig": { + "type": "object", + "properties": { + "disableHttp2": { + "type": "boolean", + "description": "Disable HTTP2 negotiation and always pick HTTP1.1." + }, + "apiThreadPoolSizeMin": { + "type": "integer", + "format": "int32", + "description": "Minimum thread pool size." + }, + "apiThreadPoolSizeMax": { + "type": "integer", + "format": "int32", + "description": "Maximum thread pool size." + }, + "apiThreadPoolSizeStepPerCore": { + "type": "integer", + "format": "int32", + "description": "Maximum thread pool size." + }, + "entityMaxSizeKiB": { + "type": "integer", + "format": "int32", + "description": "Maximum size for the HTTP body/entity." + }, + "entityWaitDelaySecs": { + "type": "integer", + "format": "int32", + "description": "Time to wait for HTTP message body to arrive." + }, + "idleTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Time to close a connection when idle." + }, + "requestTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Time to service an API request. Could be increased to support increased 2FA auth duration with Duo LDAP proxy." + }, + "bindTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "The time period within which the TCP binding process must be completed. Default is 1 second." + }, + "apiResultsCacheSecs": { + "type": "integer", + "format": "int32", + "description": "Duration to cache an API response. Cache interval complies with request Cache control headers." + }, + "apiResultsCacheCount": { + "type": "integer", + "format": "int32", + "description": "Number of entries in API response cache. Set this to 0 to disable." + }, + "staticPageCacheMins": { + "type": "integer", + "format": "int32", + "description": "Duration to cache static pages. This cache does not comply with request Cache control headers. Will be refreshed on upgrades." + }, + "staticPageCount": { + "type": "integer", + "format": "int32", + "description": "Number of entries in static page cache. Set this to 0 to disable." + }, + "apiServerIdentificationString": { + "type": "string", + "description": "Set it to empty string to disable server identification." + }, + "http2SslCipherSuites": { + "type": "string", + "description": "colon(:) separated SSL cipher suites by order of preference. Takes precedence over defaultSslCipherSuites. HTTP/2.0 requires ECDHE-RSA-AES128-GCM-SHA256. Google prefers AES128-GCM." + }, + "unbindGraceIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "API server unbinds the prebootstrap server with the real server. This interval allows Akka to gracefully cleanup pending requests." + }, + "unbindFatalIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "API server unbinds the prebootstrap server with the real server. This interval allows Akka server to abort if graceful exit failed." + }, + "retryAfterSeconds": { + "type": "integer", + "format": "int32", + "description": "When sending a HTTP 503 error, specify the duration for the client to wait before retrying the request. Since the time to wait is unknown, use a short interval." + }, + "opentracingSamplingStrategy": { + "type": "string", + "description": "Jaeger Opentracing strategy. The default strategy samples at 0.000001 probability for a maximum of 100 samples a second. At the operation level, specific operations can be overridden with different probability." + }, + "opentracingSamplingDurationMsec": { + "type": "integer", + "format": "int32", + "description": "Frequency by which new Sampling strategies are polled by Jaeger." + }, + "apiBodyLogMsgSizeLimitKiB": { + "type": "integer", + "format": "int32", + "description": "Maximum message size (in KiB) to log for API body." + } + } + }, + "GlobalAppBlueprintConfig": { + "type": "object", + "properties": { + "distributedBarrierTimeoutDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "The duration after which the distibuted barrier will stop waiting for all children register and await jobs." + }, + "appBlueprintSnapshotJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint snapshot job polls for completion of the child snapshot jobs at the specified interval." + }, + "appBlueprintSnapshotJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint snapshot job cancels child snapshot jobs after the interval elapses." + }, + "numRetriesForAggressiveResourceContention": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of attempts made to acquire all throttles for taking snapshots of the child snappables for the Blueprint." + }, + "startFailoverOnSourceThrottleDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "The failover job on source waits for the specified interval (in milliseconds) when throttling is in effect." + }, + "appBlueprintCloudImageConversionJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between successive runs of the AppBlueprintCloudImageConversion job." + }, + "appBlueprintImageConversionThrottleDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay AppBlueprint image conversion job if it is unable to acquire throttle." + }, + "expectedPercentageOfAppBlueprintsInAggressiveContentionState": { + "type": "integer", + "format": "int32", + "description": "The percentage probability that a Blueprint will be aggressively contending for resources before it gets a single synchronous snapshot." + }, + "numNonAggressiveContentionAfterSynchronizedSnapshot": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of non-aggressive resource contentions after taking a snapshot." + }, + "numContinuousAggressiveContentions": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of consecutive times that resources are aggressively contended." + }, + "numNonSynchronizedSnapshotsBeforeNoContention": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies a threshold number of unsynchronized snapshots to stop contention. When the number of unsynchronized snapshots has reached this threshold, the current set of child snappables do not contend for resources at the parent level." + }, + "internalVmwarePowerOnJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Number of internal vmware power on jobs for mounted/exported vm's that we can run at one time." + }, + "appBlueprintExportJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint export job polls for completion of the child export jobs at the specified interval." + }, + "appBlueprintExportJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint export job cancels child export jobs after the interval elapses." + }, + "exportAppBlueprintSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Blueprint Export jobs per node." + }, + "appBlueprintMountJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint mount job polls for completion of the child export jobs at the specified interval." + }, + "appBlueprintMountJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint mount job cancels child mount jobs after the interval elapses." + }, + "appBlueprintCloudImageDeleteJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint delete cloud image job polls for completion of the child image deletion jobs at the specified interval." + }, + "appBlueprintCloudImageDeleteJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint delete cloud image job cancels child image deletion jobs after the interval elapses." + }, + "mountAppBlueprintSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Blueprint Export jobs per node." + }, + "startFailoverOnSourceJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Blueprint StartFailoverOnSource jobs per node." + }, + "appBlueprintSnapshotForFailoverJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Blueprint StartFailoverOnSource job cancels the app snapshot job spawn task after the interval elapses." + }, + "appBlueprintCloudConversionForFailoverJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Blueprint StartFailoverOnSource job cancels the app cloud conversion job spawn task after the interval elapses." + }, + "appBlueprintCloudConversionJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint cloud conversion job polls for completion of the child conversion jobs at the specified interval." + }, + "appBlueprintCloudConversionJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint cloud conversion job cancels child conversion jobs after the interval elapses." + }, + "appBlueprintSnapshotCloudImageConversionJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Blueprint cloud conversion jobs per node." + }, + "appBlueprintPowerOnJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The AppBlueprint export conversion job cancels child export jobs after the interval elapses." + }, + "blueprintPolarisPath": { + "type": "string", + "description": "The path to app blueprint detail page on Polaris. The complete url would look like 'https:////'." + }, + "pollFrequencyForConversionInMs": { + "type": "integer", + "format": "int32", + "description": "This represents the polling interval time for conversion in Ms." + }, + "totalWaitTimeForConversionInMs": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time for conversion to succeed." + }, + "ebsVolumeTypeForConversion": { + "type": "string", + "description": "This represents the volume type to be used for launching volume for conversion." + }, + "internalVmwareReconfigureAndPowerOnJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Number of internal jobs to reconfigure and power on vmware vms according to their recovery specs that we can run at one time." + }, + "pollFrequencyForFingerprintCreationInMs": { + "type": "integer", + "format": "int32", + "description": "This represents the poll frequency time for polling the create fingerprint status." + }, + "maxWaitTimeForFingerprintCreationInMsForAGB": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time for fingerprint creation to succeed for 1 GB." + }, + "failoverToBrikChildJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The FailoverToBrik job polls for completion of the child jobs at the specified interval." + }, + "failoverToBrikPullReplicateJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The FailoverToBrik job cancels child pull replicate jobs after the interval elapses." + }, + "failoverToBrikHydrationJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The FailoverToBrik job cancels child hydration jobs after the interval elapses." + }, + "failoverToBrikReconfigurePowerOnJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The FailoverToBrik job cancels child reconfigure and power on jobs after the interval elapses." + }, + "failoverToBrikGuestConfigurationLogDir": { + "type": "string", + "description": "The path to local storage for guest configuration logs during failover to CDM." + }, + "failoverToBrikNetworkConfigurationTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time for network configuration script running inside guest VM during failover to Brik." + }, + "failoverToBrikNetworkConfigurationCheckIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The failoverToBrik job polls for completion of the network configuration script at the specified interval." + }, + "failoverToBrikWaitForGuestNetworkReadyTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time for guest network is ready during failover to Brik." + }, + "failoverToBrikWaitForGuestNetworkReadyCheckIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the time interval when checking guest network is ready during failover to Brik." + }, + "failoverToBrikPostScriptExecutionTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time to execute post script in milliseconds." + }, + "failoverToBrikDownloadLogFromGuestTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time to download log from guest VM in milliseconds." + }, + "failoverRefExpirationTimeInMillis": { + "type": "integer", + "format": "int64", + "description": "Time in millis after which a failover reference on an app blueprint is considered to have expired." + } + } + }, + "GlobalAppFrameworkConfig": { + "type": "object", + "properties": { + "replicationOrchestrationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent replication orchestration jobs per node." + }, + "replicationOrchestrationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between replication orchestration jobs." + }, + "replicationToCloudOrchestrationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent replication to cloud orchestration jobs per node." + }, + "replicationToCloudOrchestrationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between replication to cloud orchestration jobs." + }, + "uploadOrchestrationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent upload orchestration jobs per node." + }, + "uploadOrchestrationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between upload orchestration jobs." + }, + "aggressiveContentionSleepTimeBetweenRetriesInMs": { + "type": "integer", + "format": "int32", + "description": "Amount of sleep time for the thread before retrying resource acquisition while in AggressiveContention State." + }, + "appDownloadSnapshotJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the app snappable download snapshot job will poll for completion of child snapshot download jobs." + }, + "appDownloadSnapshotJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the app snappable snapshot download job will cancel child snapshot download jobs." + } + } + }, + "GlobalAtlasConfig": { + "type": "object", + "properties": { + "default_snapshot_encoding_type": { + "type": "integer", + "format": "int32", + "description": "Default snapshot encoding type." + }, + "default_snapshot_replication_factor": { + "type": "integer", + "format": "int32", + "description": "Default snapshot replication factor." + }, + "default_journal_replication_factor": { + "type": "integer", + "format": "int32", + "description": "Default journal replication factor." + }, + "compacted_patch_dir_replication_factor": { + "type": "integer", + "format": "int32", + "description": "Compacted patch file replication factor for merged files." + }, + "default_journal_media_type": { + "type": "integer", + "format": "int32", + "description": "Media type to use for journal files (1 = FLASH, 2 = HDD)." + }, + "default_scratch_media_type": { + "type": "integer", + "format": "int32", + "description": "Media type to use for scratch files (1 = FLASH, 2 = HDD)." + }, + "brik_failure_tolerance": { + "type": "integer", + "format": "int32", + "description": "Number of brik failures to tolerate." + }, + "metadata_scan_enabled": { + "type": "boolean", + "description": "Is metadata scan enabled." + }, + "maintenance_manager_nthreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use for running maintenance manager operations." + }, + "metadata_scan_delay_per_row_ms": { + "type": "integer", + "format": "int32", + "description": "Pace the scan at most one row per this many ms." + }, + "maintenance_manager_transcode_nthreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use for running maintenance manager transcode operations." + }, + "maintenance_manager_transcode_limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of stripes to schedule for transcoding with maintenance manager." + }, + "reserve_flash_nodes": { + "type": "boolean", + "description": "Reserve flash-heavy nodes for only running flash-heavy jobs, other regular jobs do not run on flash-heavy nodes." + }, + "flash_heavy_node_threshold_bytes": { + "type": "integer", + "format": "int64", + "description": "Minimum total flash capacity for a node to be considered as a flash-heavy node." + }, + "short_mjf_conversions_size_limit_mbs": { + "type": "integer", + "format": "int32", + "description": "If ingested bytes in the mjf are less than this threshold the PFC job is considered to be a SHORT_RUNNING_JOB." + }, + "flash_heavy_job_types": { + "type": "string", + "description": "Types of jobs that require extensive flash, and will be scheduled on flash-heavy nodes." + }, + "maintenance_manager_weights": { + "type": "string", + "description": "Weights for each queue for the maintenance manager." + }, + "drain_node_failure_tolerance": { + "type": "integer", + "format": "int32", + "description": "Number of OK node failures that each stripe with chunks on a node in PRE_MAINTENANCE needs to be able to tolerate." + }, + "mjf_read_ahead_limits": { + "type": "string", + "description": "Read Ahead Limits for each MJF read ahead type." + }, + "sdfs_split_mjf_set_job_context_timeout_millis": { + "type": "integer", + "format": "int32", + "description": "Timeout for setting job context for Split MJF. We keep this value higher than the other set job context calls because we need to set job context for all the open underlying MJFs." + }, + "sdfs_remove_all_timeout_millis": { + "type": "integer", + "format": "int32", + "description": "Timeout for removeAll RPC to sdfs. We keep this value high since recursive deletion may take a while if there are lots of files and directories under the directory for which the RPC is called." + }, + "range_cache_usage_ratio": { + "type": "number", + "format": "double", + "description": "Max disk space that can be used for a MergedFile RangeCache as fraction of the physical size consumed by all snapshots." + }, + "journal_stack_max_journal_disk_space_bytes": { + "type": "integer", + "format": "int32", + "description": "Journal size after which to roll and create a new journal." + }, + "journaled_range_cache_num_files_limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files allowed within a JournaledRangeCache." + }, + "rdfs_enabled": { + "type": "boolean", + "description": "Is RDFS enabled." + }, + "enable_router_file_metadata_bin": { + "type": "boolean", + "description": "If enabled, we will use the RouterFileMetadataBin to store a copy of all RouterFile metadata and use it while initializing the RouterFile." + }, + "use_hdd_as_staging_area": { + "type": "boolean", + "description": "If enabled, we will use the disks marked as HDD for staging area in cloud clusters. Temporary flag used for E2E, until we have first hand support for Staging Area in bootstrap." + }, + "default_data_location_id": { + "type": "string", + "description": "Default cloud data location ID. If empty, this indicates that cloud storage is not in use." + } + } + }, + "GlobalAwsConfig": { + "type": "object", + "properties": { + "s3UploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "When resumable multipart upload is enabled for s3 and s3Compatible, we upload files in chunks. This specifies the size of that chunk." + }, + "maxAwsVmImportExportTasksThatCanRunOnCluster": { + "type": "integer", + "format": "int32", + "description": "Max number of AWS VM import export jobs that can run on a cluster. The limit of 20 is a default limit defined by AWS." + }, + "maxS3FileSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Max File size that S3 supports." + }, + "maxDiskSizeForS3UploadInGB": { + "type": "integer", + "format": "int32", + "description": "Max Disk Space that should be used by multi part upload s3 api." + }, + "partSizeForS3UploadInMB": { + "type": "integer", + "format": "int32", + "description": "Default part size used for cloud upload." + }, + "maxPartSizeForS3UploadInMB": { + "type": "integer", + "format": "int32", + "description": "Maximum part Size used by multi part upload s3 api." + }, + "maxNumPartsForS3Upload": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parts for a single upload to S3." + }, + "awsMaxConnections": { + "type": "integer", + "format": "int32", + "description": "The maximum number of allowed open HTTP connections to AWS that a client may have. Used for both S3 and Glacier." + }, + "awsSocketTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout for reading from a connected socket. default 50 seconds." + }, + "awsTcpKeepAlive": { + "type": "boolean", + "description": "Sets whether or not to enable TCP KeepAlive support at the socket level for connections to AWS." + }, + "awsConnectionTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout for creating new connections. Deafult 10 seconds." + }, + "awsConnectionMaxIdleMillis": { + "type": "integer", + "format": "int32", + "description": "The maximum idle time (in milliseconds) for a connection in the connection pool. Default 60 seconds." + }, + "awsMaxRetriesForRetryableErrors": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retry attempts for failed retryable requests." + }, + "multiPartUploadDirectory": { + "type": "string", + "description": "Temporary directory where chunks are written to before being uploaded by AWS SDK." + }, + "s3ResumableMultipartUploadEnabled": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for S3. This flag is now deprecated. Please use s3EnableResumableMultipartUpload instead." + }, + "s3EnableResumableMultipartUpload": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for S3." + }, + "isPrivateObjectStoreOutOfSpace": { + "type": "boolean", + "description": "This flag can be set to True if we discover that private object store is out of space to do metadata operations." + }, + "awsS3ApnUserAgentString": { + "type": "string", + "description": "AWS User agent string used to track data uploaded by Rubrik on AWS' side. Used for S3 and Glacier." + }, + "allowedAgeAsyncDownloadJobIdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes after which an asynchronous retrieval job id should not be used further and a new retrieval request should be initiated." + }, + "awsSessionCredentialsMinRefreshThresholdInSeconds": { + "type": "integer", + "format": "int32", + "description": "Minimum time (in seconds) before expiration that we will refresh our cached AWS session credentials. Consequently, returned session credentials are always guaranteed to be valid for at least this long." + }, + "awsSessionCredentialsMaxRefreshThresholdInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum time (in seconds) before expiration that we will refresh our cached AWS session credentials." + }, + "awsSessionCredentialsRetryDelayMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds to wait between attempts to refresh temporary session credentials." + }, + "awsSessionCredentialsDefaultTokenDurationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Default AWS session credential lifetime to be passed to the token server. This value is used if the user doesn't override it when enabling session credentials for an archival location. Default 1 hour." + }, + "dcaCapServerProtocol": { + "type": "string", + "description": "Protocol to use when connecting to the CAP server." + }, + "dcaCapServerPort": { + "type": "integer", + "format": "int32", + "description": "Port to use when connecting to the CAP server." + }, + "osDiskDevicePath": { + "type": "string", + "description": "The device path at which root volume should be attached to an AWS instance." + }, + "dcaCapServerFilePath": { + "type": "string", + "description": "File path/API endpoint to use for getting credentials from the CAP server." + }, + "dcaCapServerTlsProtocol": { + "type": "string", + "description": "TLS protocol and version used to connect to the CAP server." + }, + "dcaCapServerShouldVerifyHostname": { + "type": "boolean", + "description": "If true, we will verify the hostname of the CAP server matches the certificate it provides. If false, all hostnames are allowed." + }, + "dcaCapServerShouldUseTofu": { + "type": "boolean", + "description": "If true, we trust only the certificates given by the CAP server when first used, so any subsequent connection must use the same cert or a cert from the same CA (if shouldIncludeCaCertsInTofu is also enabled)." + }, + "dcaAwsRegion": { + "type": "string", + "description": "The AWS region to use for DCA archival location." + }, + "waitTimeForAwsImportServiceThrottleInSecs": { + "type": "integer", + "format": "int32", + "description": "Time to wait for acquiring throttle for AWS Vm Imp." + }, + "useProxyForEc2": { + "type": "boolean", + "description": "Whether usage of proxy server is allowed for Ec2AccountManager." + }, + "s3EnableCaseInsensitiveHeaders": { + "type": "boolean", + "description": "Boolean value that indicates whether to use a case-insensitive comparison when checking for S3 encryption key headers. Set to true for S3-compatible locations that may use non-standard capitalization for object metadata headers. This flag currently only affects the x-amz-key header." + }, + "glacierUploadDefaultPartSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Default part size used for Glacier upload. This must be a power of two." + }, + "glacierUploadMaxNumParts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parts for a single upload to Glacier. Set to 9990 rather than 10000 (the max allowed by AWS), so the size increase during an encryption will not cause an upload failure." + }, + "glacierArchiveExpeditedRetrievalPollDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Delay in between requests when polling for the status of an Expedited tier Glacier archive retrieval job." + }, + "glacierArchiveStandardRetrievalPollDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Delay in between requests when polling for the status of a Standard tier Glacier archive retrieval job." + }, + "glacierArchiveBulkRetrievalPollDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Delay in between requests when polling for the status of a Bulk tier Glacier archive retrieval job." + }, + "glacierArchiveRetrievalTimeoutInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum number of hours to wait for a Glacier archive retrieval job to complete." + }, + "glacierDownloadJobProgressUpdateThresholdInBytes": { + "type": "integer", + "format": "int32", + "description": "Minimum number of bytes that must be downloaded from a Glacier archive before updating download job progress." + }, + "glacierInventoryRequestItemLimit": { + "type": "integer", + "format": "int32", + "description": "The number of inventory items returned by one single Glacier inventory request." + }, + "glacierKeyDownloadBatchSize": { + "type": "integer", + "format": "int32", + "description": "Size of the batch while downloading Glacier keys." + }, + "glacierKeyBatchDownloadRetryCount": { + "type": "integer", + "format": "int32", + "description": "Number of retries to batch download glacier keys." + }, + "glacierKeyDownloadInitiateRequestRetryCount": { + "type": "integer", + "format": "int32", + "description": "Number of retries to initiate download request for glacier keys." + }, + "glacierVaultLockIdValidDurationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes after which a new Glacier vault lock will expire after it is initiated, unless it is either confirmed or aborted. This value should not exceed 1440, as Glacier automatically expires any in-progress vault lock 24 hours after it is initiated." + }, + "s3CompatibleForceV4Signing": { + "type": "boolean", + "description": "Force V4 signing when communicating with an S3 Compatible object store. By default, V2 signing is used. This will affect all S3 Compatible archival locations on the cluster." + }, + "s3UsePathStyleAccess": { + "type": "boolean", + "description": "Boolean value that indicates whether to use path-based access when communicating with AWS S3 buckets. This is needed to support legacy bucket names that contain periods, as they are not compatible with S3's SSL certificates when using virtual hosted-style buckets." + }, + "costOfBoltPerHour": { + "type": "number", + "format": "double", + "description": "This is the cost per hour in dollars for running bolt on ec2. Bolt comes up with one OS disk - EBS gp2 of 400 GB + maximum of 3 data disks for consolidation three each of 1TB st1, m5.2xlarge instance. Here we take average disk size of 1.5TB." + }, + "costOfOneGBInS3PerMonth": { + "type": "number", + "format": "double", + "description": "This is the cost per month in dollars for storing one GB in S3 Standard storage class." + }, + "costOfOneGBInS3StandardIAPerMonth": { + "type": "number", + "format": "double", + "description": "This is the cost per month in dollars for storing one GB in S3 Standard Infrequent Access storage class." + }, + "costOfOneGBInS3GlacierPerMonth": { + "type": "number", + "format": "double", + "description": "This is the cost per month in dollars for storing one GB in S3 Glacier storage class." + }, + "costOfOneGBInS3GlacierDeepArchivePerMonth": { + "type": "number", + "format": "double", + "description": "This is the cost per month in dollars for storing one GB in S3 Glacier Deep Archive storage class." + }, + "consolidationBandwidthOnBoltInMBps": { + "type": "integer", + "format": "int32", + "description": "This is the bandwidth of consolidation operation in megabytes per second runnning on aws in rubrik bolt." + }, + "trustAwsServerTlsCertificateOnFirstUse": { + "type": "boolean", + "description": "Sets whether or not we should trust self signed certificates from aws server with custom endpoint." + }, + "checkForStaleUploadFilesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration at which the check and cleanup of stale temporary upload files left behind by previous upload jobs should occur." + }, + "minAgeToConsiderStaleFileInMinutes": { + "type": "integer", + "format": "int32", + "description": "When checking for stale upload files for cleanup, select a file only if it was last modified more than this duration ago." + }, + "maxWaitTimeForInstanceToReachOkSystemStatusInSecs": { + "type": "integer", + "format": "int32", + "description": "This is the maximum amount of time in seconds that we wait in order for the AWS instance to reach to OK system status." + }, + "s3CompatibleEnableResumableMultipartUpload": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for S3Compatible archival targets." + }, + "s3CompatibleUploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "When resumable multipart upload is enabled for s3 and s3Compatible, we upload files in chunks. This specifies the size of that chunk." + }, + "maxCallsToListMultipartUploadsRequest": { + "type": "integer", + "format": "int32", + "description": "The maximum number of times to call ListMultiPartUploadsRequest when querying for active multipart uploads." + }, + "forceS3CompatibleInstructionFileMode": { + "type": "boolean", + "description": "Force Instruction File encryption storage mode when creating an S3Compatible archival location." + }, + "enableS3CompatibleInstructionFileFallbackMode": { + "type": "boolean", + "description": "Enable fallback to Instruction File encryption storage mode if the default (Object Metadata) mode fails when creating an S3Compatible archival location." + }, + "maxAgeStaleMultiPartUploadsInHours": { + "type": "integer", + "format": "int32", + "description": "The maximum number of hours a MultiPartUpload can exist before being considered stale and being cleaned up." + }, + "uploadProgressUpdateSizeInMB": { + "type": "integer", + "format": "int32", + "description": "When using multipart upload, update the job progress after uploading this amount of data to reduce the frequency of updates to job_status table." + }, + "uploadProgressDurationInMinutes": { + "type": "integer", + "format": "int32", + "description": "When using multipart upload, update the job progress after this amount of time irrespective of the update size." + }, + "enableMultiPartUploadCleanupTaskOnS3CompatibleObjectStore": { + "type": "boolean", + "description": "When true, enables aborting multipart uploads from the AbortMultiPartUpload task of the Archival Maintenance job for S3 compatible object stores. If false, the task is a no-op for S3 compatible object stores." + }, + "waitTimeBeforeReArchivalAfterRehydrationInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to wait after rehydration before moving blobs back to archive." + }, + "sleepTimeWhileWaitingForRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait in between rehydration status checks. for Standard and Bulk retrieval." + }, + "sleepTimeWhileWaitingForExpeditedRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait in between rehydration status checks. for Expedited retrieval." + }, + "maxWaitTimeForExpeditedRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait for rehydration to complete with Expedited retrieval (before failing)." + }, + "maxWaitTimeForStandardRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait for rehydration to complete with Standard retrieval (before failing)." + }, + "maxWaitTimeForBulkRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait for rehydration to complete with Bulk retrieval (before failing)." + }, + "minDurationBeforeTieringToStandardIAInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum duration after a snapshot is uploaded before it can be considered for tiering to S3 Standard Infrequent Access storage class (Cool Tier)." + }, + "minDurationInStandardIAInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum duration that a snapshot must remain in the S3 Standard Infrequent Access storage class (Cool Tier), before it should be considered for tiering to S3 Glacier or Glacier Deep Archive storage class (Cold Tier). In other words, this threshold determines if a snapshot should be tiered to the intermediate Cool tier from the Hot tier before tiering it to the Cold tier. This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in the S3 Standard storage class (Hot Tier)." + }, + "minDurationInGlacierFromStandardIAInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to S3 Glacier storage class (Cold Tier) from S3 Standard Infrequent Access storage class (Cool Tier). This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in S3 Standard Infrequent Access storage class (Cool Tier)." + }, + "minDurationInGlacierDeepArchiveFromStandardIAInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to S3 Glacier Deep Archive storage class (Cold Tier) from S3 Standard Infrequent Access storage class (Cool Tier). This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in S3 Standard Infrequent Access storage class (Cool Tier)." + }, + "minDurationInGlacierFromStandardInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to S3 Glacier storage class (Cold Tier) from S3 Standard storage class (Hot Tier). This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in S3 Standard storage class (Hot Tier)." + }, + "minDurationInGlacierDeepArchiveFromStandardInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to S3 Glacier Deep Archive storage class (Cold Tier) from S3 Standard storage class (Hot Tier). This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in the Standard storage class (Hot Tier)." + }, + "disallowSmartTieringSlaForAws": { + "type": "boolean", + "description": "Whether to allow creation or edits of SLA Domains containing Smart Tiering for AWS. Smart Tiering to AWS was implemented in 5.3, but until further testing is done internally we would like to disable customer creation of such SLAs, without Rubrik involvement." + }, + "disableNewGlacierArchivalLocations": { + "type": "boolean", + "description": "Whether to disable creation of new Glacier archival locations. As of 5.3 release the Glacier location type is deprecated, and the recommended alternative is to use an S3 archival location with tiering to Glacier enabled." + }, + "suppressErrorsForUnsupportedStorageClasses": { + "type": "boolean", + "description": "When set to true, allows unsupported storage tier configurations, mapping them internally to a storage tier of Hot." + }, + "s3ColdStorageTierToUseForTieringExistingSnapshots": { + "type": "string", + "description": "Cold storage tier used by the tier existing snapshots job when tiering snapshots for unprotected objects are archived to S3 archival locations. The value of this configuration must be Glacier or GlacierDeepArchive. For protected objects, the Rubrik cluster uses the cold storage tier selected by the user on the SLA Domain." + }, + "s3MaxFileSizeForCopyFileApiInMb": { + "type": "integer", + "format": "int32", + "description": "The max file size to use the Copy File API on. If a file exceeds this file size we will skip copying the file to the target tier." + } + } + }, + "GlobalAzureConfig": { + "type": "object", + "properties": { + "azureUploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "For Azure we upload files in chunks. This specifies the size of that chunk. For prod it is set to 5 Gigs." + }, + "azureStreamWriteSizeForBlockBlobsInMB": { + "type": "integer", + "format": "int32", + "description": "This is the config used by Azure SDK to determine the size of the block that it uses for upload. Azure SDK can support this value from 16KB to 100MB." + }, + "azureStreamWriteSizeForPageBlobsInMB": { + "type": "integer", + "format": "int32", + "description": "This is the config used by Azure SDK to determine the size of the page blob block that it uses for upload. This can have a max value of 1 MB." + }, + "azureChunkSizeForUploadingPageBlobsInBytes": { + "type": "integer", + "format": "int32", + "description": "This is the chunk length size to upload a page blob using Azure SDK from a given offset." + }, + "azureProviderTag": { + "type": "string", + "description": "This value is given by MSFT team. MSFT looks at all the resources with this provider tag and see how much revenue it is generating from it. It has nothing to do with customer point of view." + }, + "uploadZerosWhileUploadingPageBlob": { + "type": "boolean", + "description": "Boolean flag to decide whether to skip zero chunks while uploading page blobs." + }, + "azureResumableMultipartUploadEnabled": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for azure." + }, + "azureApnUserAgentString": { + "type": "string", + "description": "Azure User agent string used to track data uploaded by Rubrik on Azure's side." + }, + "useProxyForAzure": { + "type": "boolean", + "description": "Whether usage of proxy server is allowed for AzureComputeAccountManager." + }, + "shouldEnableAzureCloudOnForLinuxVms": { + "type": "boolean", + "description": "If set to true, cloud on of Linux VMs onto Azure will be allowed(only from rest end point, it will still be disbled from UI). If set to false, cloud on of Linux VMs onto Azure willn't be allowed." + }, + "shouldUseDefaultParamTemplateForAzureCloudOn": { + "type": "boolean", + "description": "If set to true, specialized deployment template will be used with default arguments. Further, this template file will also be stored in customer's account. This helps customers to deploy their VMs from VHDs directly without involving Rubrik cluster in case of disaster." + }, + "azureSingleDiskVmDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM with a single disk with VHD being the source to create the disk." + }, + "azureMultiDiskVmDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM with multiple disks with VHDs being the source to create the disks." + }, + "azureSingleDiskVmFromSnapshotDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM with a single disk with snapshot being the source to create the disk." + }, + "azureMultiDiskVmFromSnapshotDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM with multiple disks with snapshots being the source to create the disks." + }, + "azureMultiDiskGeneralizedVmDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM with a single disk." + }, + "azureSingleDiskGeneralizedVmDeploymentTemplate": { + "type": "string", + "description": "Azure deployment template used to instantiate a VM from generalized image having os disk and no data disk." + }, + "azureConnectivityCheckVmSize": { + "type": "string", + "description": "Size of the azure instance for connectivity check. Standard_B1s is the least expensive one among all." + }, + "azureBoltNumDataDisks": { + "type": "integer", + "format": "int32", + "description": "Number of data disks to attach to Azure bolt instance." + }, + "azureBoltDataDiskSizeInGb": { + "type": "integer", + "format": "int32", + "description": "Size of each data disk in GB." + }, + "azureStormUsername": { + "type": "string", + "description": "Deafult Unix username for storm." + }, + "azureStormPasswordSuffix": { + "type": "string", + "description": "Password suffix to use for the user account on storm." + }, + "maxVmDiskSizeForAzureInstantiationInGBForImageConversion": { + "type": "integer", + "format": "int32", + "description": "Maximum vm disk size allowed for instantiation of vm snapshot on Azure for CLOUD_IMAGE_CONVERSION Job." + }, + "maxVmDiskSizeForAzureInstantiationInGBForInstantiation": { + "type": "integer", + "format": "int32", + "description": "Maximum vm disk size allowed for instantiation of vm snapshot on Azure for INSTANTIATE_ON_CLOUD job." + }, + "shouldGcImagesOnAzure": { + "type": "boolean", + "description": "should allow gc to run on images that are created from generalized template to launch bolt on Azure." + }, + "costOfBoltPerHour": { + "type": "number", + "format": "double", + "description": "This is the cost per hour in dollars for running bolt on Azure. Bolt comes up with three data disks Standard_LRS each of 1TB each, one Premimum LRS OS Disk of 400GB and with Standard_DS3_v2 instance. For consolidation we assume average of 1.5TB data disks. This value is calculated for US West 2 region." + }, + "costOfOneGBInBlockBlobPerMonth": { + "type": "number", + "format": "double", + "description": "This is the cost per month in US dollars for storing one GB in azure block blob. This value is for US West 2 region." + }, + "costOfOneGbInCoolBlockBlobPerMonth": { + "type": "number", + "format": "double", + "description": "Cost per GB per month for Azure Cool Tier block blob storage in US dollars. This value is for US West 2 region." + }, + "costOfOneGbInColdBlockBlobPerMonth": { + "type": "number", + "format": "double", + "description": "Cost per GB per month for Azure Archive Tier block blob storage in US dollars. This value is for US West 2 region." + }, + "consolidationBandwidthOnBoltInMBps": { + "type": "integer", + "format": "int32", + "description": "This is the bandwidth of consolidation operation in megabytes per second runnning on azure in rubrik bolt." + }, + "retryCountToWaitForFuture": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry before waiting for a future to finish." + }, + "sleepTimeWhileWaitingForFutureInSecs": { + "type": "integer", + "format": "int32", + "description": "Sleep time in seconds before checking for a future." + }, + "sleepTimeWhileWaitingForRehydrationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait in between rehydration status checks." + }, + "maxWaitTimeForRehydrationInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to wait for rehydration to complete (before failing)." + }, + "waitTimeBeforeReArchivalAfterRehydrationInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to wait after rehydration before moving blobs back to archive." + }, + "useAzureUploadInBlocks": { + "type": "boolean", + "description": "This is a feature toggle for Azure block uploading. When false, Azure will upload files in a single part. When true, Azure will use block uploading." + }, + "minDurationBeforeTieringToCoolInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum duration after a snapshot is uploaded before it can be considered for tiering to Azure Cool tier." + }, + "minDurationInCoolTierInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum duration that a snapshot must remain in the Azure Cool tier, before it should be considered for tiering to Azure Archive tier. This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in the Hot tier." + }, + "minDurationInColdTierFromCoolInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to Azure Archive tier from Cool tier. This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in the Cool tier." + }, + "minDurationInColdTierFromHotInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum remaining retention period a snapshot may have for it to be considered for tiering to Azure Archive tier from Hot tier. This threshold is determined as a guideline so that any early deletion charges that may be incurred do not exceed the cost of storage in the Hot tier." + }, + "httpShortClientTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout in milliseconds for the short Azure HTTP client." + }, + "httpLongClientTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout in milliseconds for the long Azure HTTP client." + }, + "enableMultiPartDownloadInAppendMode": { + "type": "boolean", + "description": "Flag to enable multi-part file download in append mode. When enabled, it does not download parts first into temporary files and then append to the destination file. Instead, the parts are directly downloaded as append to the destination file." + }, + "maxFileSizeForStreamBasedMigrationInMB": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum file size for which streaming based archival migration will be performed. Files larger than this size are migrated by downloading the file to the cluster first." + }, + "lsPrefixCacheEntryTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "TTL in seconds for Azure manager lsprefix caching." + }, + "lsPrefixCacheMaxSize": { + "type": "integer", + "format": "int32", + "description": "The maximum cache size for Azure manager lsprefix caching." + }, + "lsPrefixCacheThreadCount": { + "type": "integer", + "format": "int32", + "description": "The max thread count for the Azure manager lsprefix cache." + } + } + }, + "GlobalBlobstoreConfig": { + "type": "object", + "properties": { + "backupMetadataDuringGcReleaseLock": { + "type": "boolean", + "description": "This is set to true for unit tests." + }, + "backupMetadataDuringBlobStoreOpen": { + "type": "boolean", + "description": "This is set to true for unit tests." + }, + "backupMetadataDuringBlobStoreClose": { + "type": "boolean", + "description": "This is set to true for unit tests." + }, + "backupMetadataDuringBlobStoreCreate": { + "type": "boolean", + "description": "This is set to true for unit tests." + }, + "defaultShardSizeForShardedBlobStoreInGB": { + "type": "integer", + "format": "int32", + "description": "Default size of a shard if not provided to beginCreate method." + }, + "consolidateJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for consolidate jobs in minutes." + }, + "crossJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for cross increment jobs in minutes." + }, + "gcJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for blob GC jobs in minutes." + }, + "reverseJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Reverse job frequency." + }, + "crossRebaseJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for cross rebase jobs in minutes." + }, + "consolidatePercentDiskSpaceNeeded": { + "type": "number", + "format": "double", + "description": "This is the percent of disk space reserved by consolidate job. The percent is calculated on the total space occupied by the blobs in the spec." + }, + "optimizeRepresentationPercentDiskSpaceNeeded": { + "type": "number", + "format": "double", + "description": "This is the percent (multiple) of disk space reserved by optimize representation job. The percent is calculated on the space occupied by the blobs in the specs." + }, + "localConsolidateSpecsCapInGB": { + "type": "integer", + "format": "int32", + "description": "cap on the consolidate specs per group in GB on local cluster." + }, + "minDataPointsForConsolidationNotification": { + "type": "integer", + "format": "int32", + "description": "Minimum number of data points of consolidation bandwidth to be collected in deciding if a low bandwidth notification alert should be sent. This is currently only used in Archival Consolidation." + }, + "timePeriodInDaysToCollectConsolidationData": { + "type": "integer", + "format": "int32", + "description": "Time period in days for aggregating the data of bandwidth used in consolidation if a low notification bandwsith alert should be sent. This is currently only used in Archival Consolidation." + }, + "archivalConsolidateSpecsCapInGB": { + "type": "integer", + "format": "int32", + "description": "cap on the consolidate specs per group in GB on archival store." + }, + "maxWaitTimeForCloudConsolidationInDays": { + "type": "integer", + "format": "int32", + "description": "This is the max time we wait after the previous execution of consolidation on public cloud. During this time window, we only consolidate if the cost based heuristics are met but after this time window, we ignore the cost based heuristics." + }, + "gcDeletionGroupSize": { + "type": "integer", + "format": "int32", + "description": "Number of blobs to delete at a time so gc can make incremental progress." + }, + "reverseMinChangeRate": { + "type": "integer", + "format": "int32", + "description": "Minimum change rate to run reverse." + }, + "reverseMaxChainLength": { + "type": "integer", + "format": "int32", + "description": "If the chain length is at least this value, reverse is enforced even if the change rate does not reach reverseMinChangeRate. A negative value means that we never enforce reverse based on chain length." + }, + "reverseForkScoreThreshold": { + "type": "number", + "format": "double", + "description": "Threshold of score assigned to a fork in a diff tree topology when picking the chain to cross. The score is a number between 0.0 and 1.0 indicating how \"good\" a fork is - forks farther away from the base are better. Setting this to 1.0 disables the check for good forks and falls back to the earlier chain selection algorithm of picking the one with the largest content sequence number." + }, + "relativeChainLengthThresholdForReverse": { + "type": "number", + "format": "double", + "description": "Threshold used to select the set of long-ish chains, from the set of all eligible chains for reverse. This threshold is a number between 0.0 and 1.0 and allows us to decide how aggressive we want to be in picking the longest chain to reverse. Setting this to 1.0 disables this check, and falls back to previous algorithm of picking the chain with largest head content sequence number." + }, + "reverseMinNumberOfDisks": { + "type": "integer", + "format": "int32", + "description": "Minimum number of snapshots required to run reverse This is a knob that can be useful to force a reverse in the field but in general we do not want reverse operations to happen if it is not absolutely needed and hence setting this to a very high value in production." + }, + "blobStoreMetadataBackupWindowSize": { + "type": "integer", + "format": "int32", + "description": "The number of metadata backup files to keep." + }, + "enableLocalSdfsBlobStoreMetadataBackup": { + "type": "boolean", + "description": "Determines whether metadata should be backed up in sdfs when blob store is operating on blobs in the local cluster." + }, + "enableBlobStoreHeavyWeightChecks": { + "type": "boolean", + "description": "Whether to enable heavy-weight tests in blob store." + }, + "enableCascadingCross": { + "type": "boolean", + "description": "If true, enables the cross job to also perform cascading crosses, where a cross increment can later be used as a base, and a base can further cross against any full or cross increment." + }, + "crossTreeDepthWeightParam": { + "type": "number", + "format": "double", + "description": "This parameter only matters when cascading cross is enabled. It is used in the cross base selection heuristic for cascading cross as the weight for the cross tree depth - w1 * crossTreeDepth + w2 * numCrossRefs This parameter is w1 in the above expression." + }, + "numCrossReferencesWeightParam": { + "type": "number", + "format": "double", + "description": "This parameter only matters when cascading cross is enabled. It is used in the cross base selection heuristic for cascading cross as the weight for the number of cross references - w1 * crossTreeDepth + w2 * numCrossRefs This parameter is w2 in the above expression." + }, + "similarityThresholdForCross": { + "type": "number", + "format": "double", + "description": "Threshold of similarity required to choose a cross base while deduping a full snapshot." + }, + "maxNumberOfSimilarGroupsToConsiderForCross": { + "type": "integer", + "format": "int32", + "description": "In large clusters cross can get expensive because there are O(n^2) possible pairs of groups to compare, which leads to increases in the number of queries we make per-node. To avoid an extreme case, limit the number of similar groups we'll check to find a valid cross base." + }, + "maxNumCrossRefsSoftCap": { + "type": "integer", + "format": "int32", + "description": "DiffChainBlobStore's soft cap on the number of crosses to permit that are based on a single base. The cap is soft in the sense that we do a precheck and exclude candidate bases with more cross refs prior to choosing a cross base, but do not enforce anything after the fact. There could be races that allow the number to exceed this cap, and if the cap is lowered, we will not enter a failure loop." + }, + "maxCrossTreeHeight": { + "type": "integer", + "format": "int32", + "description": "DiffChainBlobStore's limit on the height of the cross tree for cascading crosses. This limit is enforced by the cross job before performing a cross, but we do not enfore this in blobstore transaction invariants." + }, + "similarityToleranceForCrossBase": { + "type": "number", + "format": "double", + "description": "During a cross operation, we pick the cross base based on the similarity score. This implies that in a scenario where there are multiple contents that are quite similar (e.g., during a PoC) there is no guarantee that all contents will get crossed against the same base (which is the most optimal outcome). By tweaking this parameter we can control this behaviour, and enable the cross job to pick the base with the most number of existing cross refs. E.g., if this config is set to 0.05, and we observe the following similarity scores - 0.4, 0.38, 0.37, 0.34, 0.32 (after filtering with the usual similarity threshold); then, we will pick the base with the most refs from among 0.4, 0.38, 0.37 (since they are all within a tolerance of 0.05 relative to the max, 0.4)." + }, + "simHashCacheFileRefreshFrequencySeconds": { + "type": "integer", + "format": "int32", + "description": "Period of the global job to rebuild the in sim hash cache file." + }, + "simHashCacheStalenessThresholdMillis": { + "type": "integer", + "format": "int32", + "description": "Threshold to reload the in memory sim hash cache from the cached file. A value of 0 implies we always reload, assuming time is monotonic." + }, + "maxBlobsToGcPerJob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of blobs to be GCed per job run." + }, + "transactionUtilNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to try before failing a transaction." + }, + "metadataBackupNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to try before failing a metadata backup procedure." + }, + "maxSleepUpperBoundInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum amount of time to sleep before the next retry." + }, + "consolidationWindowForFullUploadInHrs": { + "type": "integer", + "format": "int32", + "description": "This is the time window for which we relax our constraints on the chain length so that consolidation would clean up the expired snapshots within the window." + }, + "maxArchivalChainLengthWithConsolidationEnabled": { + "type": "integer", + "format": "int32", + "description": "This is the max chain length that should be present on the archival location with consolidation enabled after the time window consolidationWindowForFullUploadInHrs." + }, + "maxArchivalChainLengthWithConsolidationDisabled": { + "type": "integer", + "format": "int32", + "description": "Upload the next snapshot as a full if uploading it as an incremental will causse the chain to exceed this length with archival consolidation disabled." + }, + "maxLocalConcurrentConsolidateSpecExecutions": { + "type": "integer", + "format": "int32", + "description": "Max number of possible concurrent executions for the given sequence of consolidation specs on a local cluster." + }, + "throttleWaitTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Wait time in seconds to acquire throttle for bg jobs." + }, + "throttleSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time in seconds for acquiring throttle for bg jobs." + }, + "maxBranchedIncrementsPerBlob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of branched increments present per blob." + }, + "reverseProgressPollIntervalSeconds": { + "type": "integer", + "format": "int32", + "description": "Polling interval for tracking reverse-job-progress. This is used to free up disk-semaphore permits as reverse makes progress." + }, + "reverseDiskSemaphoreIncrementalReleaseThresholdInGB": { + "type": "integer", + "format": "int32", + "description": "Decides how much unnecessary disk-semaphore permits reverse job should let accumulate before releasing them incrementally." + }, + "maxPatchSizeForArchivalReverseInGb": { + "type": "integer", + "format": "int32", + "description": "Maximum size of a patch file when performing archival reverse in GB." + }, + "runUnsetGiganticShardSizeJob": { + "type": "boolean", + "description": "If true, runs a job which unsets shard size for all sharded groups, if it's greater than gigantic shard size (100 PB)." + }, + "unsetGiganticShardSizeJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between unset gigantic shard size job instances." + }, + "isPromoteManagedVolumeExportHandleJobEnabled": { + "type": "boolean", + "description": "If true, runs a job that promotes non-rebasing handles on MV exports to rebasing handles." + }, + "promoteManagedVolumeExportHandleJobFreqInMin": { + "type": "integer", + "format": "int32", + "description": "Time interval between MV export handle promotion job." + }, + "leakedHandleLifeInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum threshold to consider a potentially leaked handle as in-progress." + }, + "leakedCreateSpecLifeInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum threshold to consider a potentially leaked create spec as in-progress." + }, + "abandonedGroupPurgeJobFreqInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between abandoned-group-purge job runs." + }, + "reportFailuresBeingRetried": { + "type": "boolean", + "description": "Prints details of exception that lead to retry." + }, + "dataVerificationExpiryInMins": { + "type": "integer", + "format": "int32", + "description": "Time after which a full which has been scanned for corruption will be considered invalid again in minutes. Also the minimum time after which a blob can be rescanned if earlier attempt had failed with a re-triable error." + }, + "testServerPort": { + "type": "integer", + "format": "int32", + "description": "Listener port for blobstore test-service. This service is only used by blobstore-functional-tests and is never used in production." + }, + "testSvcSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Read/connect/write timeout in ms for test-svc sockets." + }, + "testServerDebugPort": { + "type": "integer", + "format": "int32", + "description": "JDWP listener port for blobstore test-service. This service is only used by blobstore-functional-tests and is never used in production." + }, + "testDataServerPort": { + "type": "integer", + "format": "int32", + "description": "Listener port for blobstore data-test-service. This service is only used by blobstore-functional-tests and is never used in production." + }, + "optimizeRepresentationJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Optimize representation job frequency." + }, + "maxUnoptimizedContentsPerGroup": { + "type": "integer", + "format": "int32", + "description": "The maximum number of unoptimzed contents allowed in a diff-chain group, after which beginCreate of any new blob would fail." + }, + "enableRepresentationOptimizer": { + "type": "boolean", + "description": "Determines whether the representation of unoptimized vpfs (e.g. journals) should be optimized to patch files. This should ideally be used only for testing purposes (for e.g. halt pfc and test export journals) and not in production environments." + }, + "alertPercentageThresholdForMaxUnoptimizedContents": { + "type": "integer", + "format": "int32", + "description": "When the percentage of current unoptimized contents in a diff-group compared to the maximum allowed unoptimized contents exceeds the threshold, alert is sent." + }, + "timeToLiveForCsv": { + "type": "integer", + "format": "int32", + "description": "Time to live for result csv for backup verification." + }, + "isBackupVerificationEnabled": { + "type": "boolean", + "description": "A boolean that determines if backup verification is enabled." + }, + "maxBackupVerificationJobPerNode": { + "type": "integer", + "format": "int32", + "description": "The maximum number of backup verification job allowed per node in running state at a given time." + }, + "percentFingerprintsToVerify": { + "type": "integer", + "format": "int32", + "description": "Percent to fingerprints to verify in MJF verification." + }, + "maxMjfVerificationTaskThread": { + "type": "integer", + "format": "int32", + "description": "The maximum number of thread used for backup verification Mjf Task." + }, + "maxSnapshotsVerifiedPerJob": { + "type": "integer", + "format": "int32", + "description": "The maximum number of snapshots that can be verified per instance of backup verification job." + }, + "blobstoreServerPort": { + "type": "integer", + "format": "int32", + "description": "Port on which BlobStore GRPC server runs." + }, + "archivalChainLengthAbsoluteLimit": { + "type": "integer", + "format": "int32", + "description": "This is the absolute max chain length that should be present on the archival location, superseding all other limits." + }, + "enableVpfAsFinalFormat": { + "type": "boolean", + "description": "If set to true, the VPF is not converted to another format using transcode jobs. The current format of a VPF is maintained, unless blobstore background jobs modify the contents of the chain associated with this VPF." + } + } + }, + "GlobalCallistoConfig": { + "type": "object", + "properties": { + "gcExpireTtlJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of TTL job in minutes." + }, + "cockroachBackupJobPeriodMinutes": { + "type": "integer", + "format": "int32", + "description": "Period of the cockroach backup job, in minutes." + }, + "cockroachCompactJobPeriodMinutes": { + "type": "integer", + "format": "int32", + "description": "Period of the cockroach compact job, in minutes." + } + } + }, + "GlobalCdpConfig": { + "type": "object", + "properties": { + "logToFinalizeOnSourceSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time while waiting for the log to be finalized on the source." + }, + "logToFinalizeOnSourceRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts while waiting for the log to be finalized on the source." + }, + "logToFinalizeOnSourceLiveReplicatorClosedRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts while waiting for a log which was live replicated and was marked closed by the LRS (log-receiver) in the data path." + }, + "logReplicationSenderLoopSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time after every sharded loop on virtual machines to start log replication, update replication state and cleanup if required." + }, + "retriesForPersistenceFailuresInStreamHandle": { + "type": "integer", + "format": "int32", + "description": "The number of retries for persistence failures in the stream handle in metadata." + }, + "initialSleepTimeInMsForStreamHandleRetry": { + "type": "integer", + "format": "int32", + "description": "Sleep time between subsequent retries on persistent failure in stream handle." + }, + "replicationRequestsRetryInitialSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time betweeen subsequent requests to replication service." + }, + "replicationRequestsRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts for requests to replication server." + }, + "replicationFailureRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts for replication failures." + }, + "replicationFailureRetrySleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time betweeen replication failure retries." + }, + "replicationFailureAttemptsBeforeSendingEvent": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts before sending replication failure event." + }, + "logCleanupReplicationRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max cleanUp attempts." + }, + "replicationExecutorThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Max size of the thread pool in the cdp replication executor." + }, + "replicationCompleterThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Max size of the thread pool in the cdp replication completer." + }, + "duplicateUuidCheckDiskBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number of disks to check when pruning disk UUID duplicates." + }, + "replicationOrchestratorPersistSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "Max sleep time while persist the orchestrator." + }, + "logReplicationStatusPollerSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time after every loop of polling status for all the stream logs currently replicating on a node." + }, + "logReplicationStaleThresholdSeconds": { + "type": "integer", + "format": "int32", + "description": "For log replicating from current or any other node, if lastUpdateTime in stream replicating metadata column of stream source common is not updated for more than above threshold, then clean up." + }, + "bypassCdpBrokenEvent": { + "type": "boolean", + "description": "If true, we will not throw any events regarding vms with broken cdp status during vmware refresh." + }, + "prioritizeCdpRequiredSnapshots": { + "type": "boolean", + "description": "If set to True, all the cdp-must-have snapshots would be replicated first even if this requires skipping some snapshots in between." + }, + "replicateLatestSnapshotsForCdp": { + "type": "boolean", + "description": "If set to True, pull replicate would always the latest snapshot to the target for cdp enabled VMs." + }, + "maxVdiskHandleHistorySize": { + "type": "integer", + "format": "int32", + "description": "Number of handles for which to preserve history in the virtual disk table." + }, + "sleepTimeBeforeRetryingNextLogToReplicateInSec": { + "type": "integer", + "format": "int32", + "description": "This is the sleep time after which we retry getting the next log to replicate in cdp-replication. This retry is to handle various races in the orchestrator." + }, + "logReplicationSleepTimeBeforeCleanupInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time before cleanup in case of client error." + }, + "replicationOrchestratorGcJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of ReplicationOrchestrator GC jobs per node." + }, + "replicationOrchestratorGcJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency at which ReplicationOrchestratorGc job is run." + }, + "enableSDFSProfilingForCdpReplication": { + "type": "boolean", + "description": "Flag to enable sdfs IO profiling for cdp replication." + }, + "enableJavaSdkForCdpOperations": { + "type": "boolean", + "description": "Enable java sdk for cdp operations." + }, + "inMemoryActiveHandlesHolderMaintainenceTaskFrequencyInSecs": { + "type": "integer", + "format": "int32", + "description": "Frequency of running the task for in memory active handles holder in seconds." + }, + "cdpStateMaintainerTaskFrequencyInSecs": { + "type": "integer", + "format": "int32", + "description": "Frequency of running the task for in memory active handles holder in seconds." + }, + "vmBrokenDetectorGracePeriodAfterHandleRemoveInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration after which we mark a CDP VM Broken from the time the last handle was removed." + }, + "vmBrokenDetectorGracePeriodAfterPowerOnInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration after which we mark a CDP VM Broken from the time the VM was powered on." + }, + "inMemoryActiveHandlesHolderLimit": { + "type": "integer", + "format": "int32", + "description": "Max number of active handles that could be stored in the in-memory holder. This limit is set to make sure that we don't end up leaking memory indefinitely. Around 10 VMs per node is expected for CDP. Assuming 4 disks, there could be 40 active handles. The limit is set at 20x of this to take care of transient edges cases." + }, + "lastUsedIoFilterFqdn": { + "type": "string", + "description": "This is the last IO filter fqdn used during install. Its set to \"\" by default and it will be set in the code, whenever a filter is installed." + }, + "maxLogsToKeepInOrchestratorChain": { + "type": "integer", + "format": "int32", + "description": "The max number of logs to keep in the orchestrator chain. The limit exists to ensure that the orchestrator metadata does not grow too big." + }, + "maxNumberOfOrchestratorChains": { + "type": "integer", + "format": "int32", + "description": "This is the max number of snapshot-log chains to have in each cdp replication orchestrator row." + }, + "cmsThriftSelectorServerNumSelectorThreads": { + "type": "integer", + "format": "int32", + "description": "Number of selector threads in Thrift non blocking server for CDP log receiver service." + }, + "cmsThriftSelectorServerNumWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Number of selector threads in Thrift non blocking server for CDP log receiver service." + }, + "handleStalenessThresholdOnTargetClusterInMinutes": { + "type": "integer", + "format": "int32", + "description": "This is the max time a handle for log replication can be active before it will be force Gced on target." + }, + "maxTransitionHistoryForStreamSourceReplicationState": { + "type": "integer", + "format": "int32", + "description": "This is the max number of transitions stored in the transition history table." + }, + "cdpInactiveHandleDurationThresholdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration threshold after which handle is marked inactive if no beats received from cdp receiver during this interval." + }, + "gcInactiveCdpHandleFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency for GcInactiveCdpHandle Job in minutes." + }, + "scheduledCancelableTaskTimeoutDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for task during terminate." + }, + "scheduledCancelableTaskSleepDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep duration between status checks during termination." + }, + "scheduledCancelableTaskInitialDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Initial delay for scheduled runnables." + }, + "cdpTaskRunnerMonitorSleepDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep duration between monitoring attempts in CdpTaskRunner." + }, + "cdpTaskRunnerNumRetriesDuringTermination": { + "type": "integer", + "format": "int32", + "description": "Number of times CdpTaskRunner will retry while terminating before exiting." + }, + "processActiveHandleDisksForReplicationSleepTimeInSec": { + "type": "integer", + "format": "int32", + "description": "Sleep time between two runs of processing active handle disks for cdp replication." + }, + "postSlaAssignAndPatchWarningJobMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Max Instances Per Node for PostSlaAssignAndPatchWarningJob." + }, + "maxDeltaBetweenCalculatedHostTimeAndEndHostTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in seconds. The specified interval is the maximum difference between the calculated host time and the end host time of a stream log. Differences that exceed the specified interval generate a user notification of the potential clock skew. The default value of this interval is 3." + }, + "enableCdpLogBatchGet": { + "type": "boolean", + "description": "If true getting all stream logs for a stream source should uses a batch get. If false a point query is made for each log." + }, + "enableGetCdpLiveInfoParallelism": { + "type": "boolean", + "description": "If true getCdpLiveInfo uses a thread pool to compute CDP live info for multiple VMs at once. If false the CDP live info is computed in a serial fashion for each Vm. Look at the code and comments in getVmwareCdpLiveInfo to estimate the memory impact before turning this on." + }, + "useConfigurableThreadPoolForGetCdpLiveInfoParallelism": { + "type": "boolean", + "description": "This config is only used if enableGetCdpLiveInfoParallelism is set to true. If this config( useConfigurableThreadPoolForGetCdpLiveInfoParallelism) is true getCdpLiveInfo uses a custom thread pool. If both this config and useCdpApiThreadPool are true this config takes precedence. If both this config and useCdpApiThreadPool are false the api uses the thread pool, ExecutionContext.Global, defined in VmApiImpls." + }, + "preferIpv4OverIpv6": { + "type": "boolean", + "description": "If true, for IO filter install and data transfer, we prefer IPv4 IPs of the ESXi Host to communicate with over IPv6 IPs. If false, we prefer IPv6 over IPv4." + }, + "numberOfThreadsForGetCdpLiveInfoThreadPool": { + "type": "integer", + "format": "int32", + "description": "This config is only used if useConfigurableThreadPoolForGetCdpLiveInfoParallelism is set to true. This config(numberOfThreadsForGetCdpLiveInfoThreadPool) represents the number of threads that should be used in the getCdpLiveInfo custom thread pool(the thread pool that is created anew on every call to the API. This value should be > 0. If set to <= 0 the code treats it as 1." + }, + "useCdpApiThreadPool": { + "type": "boolean", + "description": "This config is only used if enableGetCdpLiveInfoParallelism is set to true and useConfigurableThreadPoolForGetCdpLiveInfoParallelism is set to false. If this config(useCdpAPIThreadPool) is true, and the above conditions hold, then the long term CDP API Thread Pool, defined in the spray server app is used. The long term thread pool has its number of threads defined by numberOfThreadsForCdpApiThreadPool." + }, + "numberOfThreadsForCdpApiThreadPool": { + "type": "integer", + "format": "int32", + "description": "This config is only used useCdpApiThreadPool is set to true. This config(numberOfThreadsForGetCdpLiveInfoThreadPool) represents the number of threads that should be used in the long term CDP API custom thread pool. This value should be > 0. If set to <= 0 the code treats it as 1." + }, + "opentracingSamplingStrategy": { + "type": "string", + "description": "Jaeger Opentracing strategy. The default strategy samples at 0.000001 probability." + }, + "opentracingSamplingDurationMsec": { + "type": "integer", + "format": "int32", + "description": "Frequency by which new Sampling strategies are polled by Jaeger." + }, + "supportedVmwareIoFilterVersions": { + "type": "string", + "description": "The supported vmware IO filter version list for every vSphere version, which supports one CDM release version backward compatibility. For example, assume 5.1.0 -> 1.0.9, 5.2.0 -> 1.0.9, 5.2.1 -> 1.0.23, 5.3 -> 1.0.26, then 5.1->5.2.0 is [1.0.9], 5.1->5.2.1 is [1.0.23, 1.0.9], 5.2.0->5.3 or 5.2.1->5.3 is [1.0.26, 1.0.23, 1.0.9]. These full version numbers should match the exact version numbers returned by the VMware API and shown in the vSphere UI. Add the new version to the start of the lists to make sure the latest filter version is the first one. Actual supported IO filter version for each previous CDM release 5.1.x -> 1.0.9 5.2.0, 5.2.1 -> 1.0.9 5.2.2, 5.3.x -> 1.1.17." + }, + "ioFilterVersionFamilies": { + "type": "string", + "description": "A map from the vSphere version when an IO filter was released to all the versions that IO filter can support." + }, + "maxCdpLogRetentionInHours": { + "type": "integer", + "format": "int32", + "description": "Max number of hours we retain CDP logs." + } + } + }, + "GlobalCerebroConfig": { + "type": "object", + "properties": { + "clusterPrivateKey": { + "type": "string", + "description": "The cluster's private key." + }, + "clusterPublicCertificate": { + "type": "string", + "description": "The cluster's public certificate, anyone having this can communicate with the current cluster." + }, + "archivalLocationReconnectJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent archival location reconnect jobs per node." + }, + "auditJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent audit jobs per node." + }, + "calculateEffectiveSlaJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent calculate effective SLA jobs per node." + }, + "cleanupDatabaseBackupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cleanup database backup jobs per node." + }, + "cleanUpReferencesJobInMemoryShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to clean references." + }, + "cleanupOldSdScratchBlobstoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cleanup old sd scratch blobstore jobs per node." + }, + "cleanupOldSdScratchBlobstoreJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Job frequency in minutes for cleanup old sd scratch blobstore." + }, + "maxStorageRunwayRemainingInDays": { + "type": "integer", + "format": "int32", + "description": "Max storage runway remaining in days, default value of 5 years." + }, + "consolidateOnCloudJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for consolidade on cloud jobs in minutes." + }, + "createCloudImageJobFrequencyInMins": { + "type": "integer", + "format": "int32", + "description": "Interval for CreateCloudImage jobs in minutes." + }, + "createCloudImageJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent CreateCloudImage jobs per node." + }, + "throttleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshots per ESXi host out of which one reference will only be used for app snapshots." + }, + "throttlePhysicalHostMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshots per physical host." + }, + "throttlePhysicalProxyHostMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshots per physical proxy host. This is different from `throttlePhysicalHostMaxRefCount` because the proxy host may have different workload characteristics than those of the primary application host." + }, + "defaultPageSize": { + "type": "integer", + "format": "int32", + "description": "Default size for pagination in cassandra, a smaller value should be used for tables with large rows." + }, + "defaultMultigetSize": { + "type": "integer", + "format": "int32", + "description": "Default size for multi-get in cassandra." + }, + "defaultSleepForSemaphoreInSeconds": { + "type": "integer", + "format": "int32", + "description": "The default amount of time to sleep when the semaphore cannot be acquired." + }, + "deleteReplicationSourceJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete replication source jobs per node." + }, + "deleteVcenterJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete vcenter jobs per node." + }, + "downloadJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent download jobs per node." + }, + "expireStreamLogJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency at which expire stream log job runs per stream source." + }, + "gcStreamLogsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes for stream log GC jobs." + }, + "gcStreamLogsJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent stream log GC jobs per node." + }, + "pruneCdpHistoryJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes for pruning cdp history jobs." + }, + "pruneCdpHistoryJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent prune CDP history jobs." + }, + "pruneCdpHistoryJobRetentionInHours": { + "type": "integer", + "format": "int32", + "description": "How many hours we should keep CDP state history." + }, + "pruneCdpHistoryJobMonthHistoryRetentionInDays": { + "type": "integer", + "format": "int32", + "description": "How many days we keep entries in the cdp_history_month table. Going w/ 33 initially to give us a buffer to handle clock skews and different time zones." + }, + "restoreFileJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent restore file jobs per node." + }, + "storageArrayRestoreFileJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs to restore files to hosts per node." + }, + "enableDiskSpaceSemaphore": { + "type": "boolean", + "description": "Whether to use the disk space semaphore before creating a snapshot." + }, + "allowExpirationOfMostRecentOnDemandSnapshotWithPolicy": { + "type": "boolean", + "description": "Whether to allow expiration of the most recent on demand snapshot with policy." + }, + "snapshotIntegritySamplingRatio": { + "type": "number", + "format": "double", + "description": "Sample size used for computing hash of a snapshot." + }, + "expireJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Expire snapshot jobs interval in minutes." + }, + "exportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent export jobs per node." + }, + "dbLogBackupDelayNotificationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent database log backup delay notification jobs per node." + }, + "aggressiveContentionSleepTimeBetweenRetriesInMs": { + "type": "integer", + "format": "int32", + "description": "Amount of sleep time for the thread before retrying resource acquisition while in AggressiveContention State." + }, + "referenceOpMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to add/remove a reference." + }, + "referenceOpBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initital backoff duration to add/remove a reference." + }, + "archivalPromoteJobUpdateRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for promote archival location jobs." + }, + "archivalRefreshJobUpdateRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for refresh archival location jobs." + }, + "archivalDeleteJobUpdateRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for delete archival location jobs." + }, + "recoverArchivedMetadataJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for recovering metadata from archive." + }, + "frequentStatsUpdaterJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent frequent stats updater jobs per node." + }, + "hostLogCleanupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent host log cleanup jobs per node." + }, + "shouldThrottleBackupJobs": { + "type": "boolean", + "description": "Whether or not to throttle backup jobs based on source load." + }, + "backupThrottlingDelayInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to delay a backup job if we decide to throttle it." + }, + "pruneJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to prune job instance table." + }, + "enableParallelizableStatsUpdater": { + "type": "boolean", + "description": "Whether or not to use the parallelized implementation of stats updater." + }, + "infrequentStatsUpdateTimeoutInMins": { + "type": "integer", + "format": "int32", + "description": "Timeout while waiting for infrequent stats to complete." + }, + "infrequentStatsUpdaterJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent infrequent stats updater jobs per node." + }, + "jobMaintainerJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent job maintainer jobs per node." + }, + "liteRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lite refresh jobs per node." + }, + "logUploadEnabled": { + "type": "boolean", + "description": "True if log upload is enabled." + }, + "logUploaderFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for stats collecting jobs in minutes." + }, + "logUploaderJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent log uploader jobs per node." + }, + "metadataMaintenanceJobFrequency": { + "type": "integer", + "format": "int32", + "description": "Interval for metadata maintenance jobs in minutes." + }, + "metadataMaintenanceJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent metadata maintenance jobs per node." + }, + "mssqlLogDownloadMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times to attempt to download an SQL Server log. The download job fails after exhausting the specified number of attempts." + }, + "oracleLogDownloadMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times to attempt to download an Oracle log. The download job fails after exhausting the specified number of attempts." + }, + "mssqlTakeSnapshotSizeEstimationBuffer": { + "type": "number", + "format": "double", + "description": "Buffer as a fraction of size for size estimation when taking a mssql snapshot." + }, + "refreshMountSharesJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval (in minutes) for the job to refresh mount shares." + }, + "refreshMountSharesJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs to refresh mount shares per node." + }, + "esxAddrResolvJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for nfs share jobs in minutes." + }, + "periodicUpdateDefaultTimeoutInMins": { + "type": "integer", + "format": "int32", + "description": "Default time out while waiting for any periodic update to run." + }, + "periodicUpdateAbortTimeoutInMins": { + "type": "integer", + "format": "int32", + "description": "Time out after issuing the abort to each update." + }, + "pullReplicateIntegrityProbability": { + "type": "integer", + "format": "int32", + "description": "Probability that we will decide to run snapshot integrity for a replicated snapshot." + }, + "pullMssqlLogReplicateJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of mssql log replication jobs in minutes." + }, + "pullOracleLogReplicateJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Log replication jobs are scheduled at the frequency specified by the interval." + }, + "pullReplicateCloseSnappableNumRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to close snappable before raising alert." + }, + "maximumReplicationCatchupBoundInDays": { + "type": "integer", + "format": "int32", + "description": "Maximum time in days within which replication jobs try to select the next snapshot to replicate." + }, + "minimumReplicationLagAllowedInDays": { + "type": "integer", + "format": "int32", + "description": "Minimum lag time in days a replication job waits before starts to skip snapshots." + }, + "laggedTimeToSkipFraction": { + "type": "number", + "format": "double", + "description": "Fraction of the lagged time we skip for the next snapshot to replicate. With the default of 0.5, we skip all snapshots within the first half of the lagged time and start picking next snapshot from the second half." + }, + "mssqlLogUploadJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. SQL Server log upload jobs are scheduled at the specified interval." + }, + "mssqlLogUploadJobMaxLogsToUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of logs to upload in a single mssql log upload job." + }, + "oracleLogUploadJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Oracle log upload jobs are scheduled at the frequency specified by the interval." + }, + "oracleLogUploadJobMaxLogsToUpload": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the maximum number of Oracle logs to upload in a single Oracle log upload job." + }, + "pullReplicatePollPeriodMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between pull replicate jobs in minutes." + }, + "pullReplicateJobDelayRangeSeconds": { + "type": "integer", + "format": "int32", + "description": "Time by which the pull replicate job should be delayed if it can't aquire resources." + }, + "queryDatabaseBackupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent query database backup jobs per node." + }, + "refreshHypervScvmmNumRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for hyperv scvmm refresh jobs." + }, + "refreshHostNumRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for host refresh jobs." + }, + "refreshNumRetries": { + "type": "integer", + "format": "int32", + "description": "Delay the lite refresh by the specified time if the refresh throttle is not available. The unit is seconds." + }, + "refreshHostJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent host refresh jobs per node." + }, + "refreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent refresh jobs per node." + }, + "refreshHypervScvmmJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of refresh hyperv scvmm jobs in minutes." + }, + "refreshHostJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of refresh host jobs in minutes." + }, + "esxCbtBlacklistAlertIntervalInMs": { + "type": "integer", + "format": "int32", + "description": "Interval of the alert check for ESX with CBT blacklisted builds. Regardless of the value here this check will not run more frequently than the full refresh, i.e. the effective period will not be shorter than the interval specified by refreshJobIntervalInMinutes." + }, + "remoteSnapshotRefreshRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry a remote snapshot refresh job." + }, + "removeVmwareSnapshotIssuesJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent remove vmware snapshot issues jobs per node." + }, + "snapshotIntegrityJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "How frequently to run snapshot integrity (if enabled)." + }, + "snapshotIntegrityJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshot integrity jobs per node." + }, + "snapshotIntegrityMinSnapshotPeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Maximum frequency (minimum period) for verifying a snapshot's integrity." + }, + "snapshotInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for Snapshot insertion." + }, + "snapshotMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to add an entry into Snapshot table." + }, + "streamLogInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for StreamLog insertion." + }, + "streamLogMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to add an entry into StreamLog table." + }, + "streamLogMaxNumPerDisk": { + "type": "integer", + "format": "int32", + "description": "Limits number of stream logs that can be created for a disk." + }, + "streamLogExtraNumTolerance": { + "type": "integer", + "format": "int32", + "description": "Extra number of stream logs beyond the maximum number specified by streamLogMaxNumberPerDisk to tolerate." + }, + "streamLogAlertingPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of max stream logs at which alerts will be made." + }, + "snapshotEarlyAllowanceInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes early we will allow a snapshot to run if it was scheduled but doesn't need to run to meet SLA." + }, + "snappableGroupInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for Snappable Group update." + }, + "snappableGroupMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to update an entry into Snappable Group table." + }, + "snappableGroupV2Enabled": { + "type": "boolean", + "description": "Enable snappable group V2 implementation." + }, + "sourceClusterRefreshJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between source cluster refresh jobs in minutes." + }, + "sourceClusterRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent source cluster refresh jobs per node." + }, + "sourceClusterRefreshSnappableBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number of snappables whose configs should be updated together." + }, + "statCollectorJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent stat collector jobs per node." + }, + "failOnMountBootupFailure": { + "type": "boolean", + "description": "Fail mount job if mount fails to boot up." + }, + "failOnExportBootupFailure": { + "type": "boolean", + "description": "Fail mount job if export VM fails to boot up." + }, + "replicateToCloudJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "ReplicateToCloud job interval in minutes." + }, + "uploadJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Upload job interval in minutes." + }, + "uploadIndexJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Upload index job interval in minutes." + }, + "archivedIndexableSnapshotExpiryThresholdInMins": { + "type": "integer", + "format": "int32", + "description": "If a snapshot is uploaded and is going to expire but hasn't been indexed yet, then we don't expire it immediately and wait for certain threshold to reach before expiring it." + }, + "verifySlaJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent verify sla jobs per node." + }, + "acceptableThresholdViolationForExpiration": { + "type": "number", + "format": "double", + "description": "Factor by which unexpired snapshots can violate SLA Buckets before an extra snapshot is retained." + }, + "maximumAcceptableSlaThresholdUpToWeeklyInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum number of hours by which unexpired snapshots can violate SLA bucket for frequencies up to Weekly." + }, + "maximumAcceptableSlaThresholdBeyondWeeklyInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum number of hours by which unexpired snapshots can violate SLA bucket for Monthly, Quarterly, Yearly frequencies." + }, + "acceptableThresholdViolation": { + "type": "number", + "format": "double", + "description": "Factor by which unexpired snapshots can violate SLA Bucket before they are considered late." + }, + "firstSnapshotViolationThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Amount of time it is OK to miss the first snapshot by." + }, + "minimumAcceptableSlaThresholdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum threshold to flag a snapshot as missed, in minutes." + }, + "maximumAcceptableSlaThresholdInDays": { + "type": "integer", + "format": "int32", + "description": "Maximum threshold allowance before reporting a missed hourly snapshot, in days." + }, + "slaGracePeriodForDailyInHours": { + "type": "integer", + "format": "int32", + "description": "Threshold allowance before reporting a missed daily snapshot, in hours." + }, + "slaGracePeriodForWeeklyInHours": { + "type": "integer", + "format": "int32", + "description": "Threshold allowance before reporting a missed weekly snapshot, in hours." + }, + "slaGracePeriodBeyondWeeklyInDays": { + "type": "integer", + "format": "int32", + "description": "Threshold allowance before reporting a missed monthly, quarterly or yearly snapshot, in days." + }, + "diskSpaceSemaphoreMinSizeInGb": { + "type": "number", + "format": "double", + "description": "Min size guarded by a single disk space semaphore, in GiB." + }, + "diskSpaceSemaphoreChunkSizeInGb": { + "type": "number", + "format": "double", + "description": "Chunk size used for distributed disk space allocation, in GiB." + }, + "diskSpaceSemaphoreMinSizeToNeedAcquisitionInGb": { + "type": "integer", + "format": "int32", + "description": "Min size to allow allocation. If the requested size is smaller than this value then the allocation will be ignored in order to avoid contention on the disk space semaphore for small and possibly frequent allocations." + }, + "distributedSemaphoreMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to allocate on a single semaphore." + }, + "distributedSemaphoreInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff." + }, + "distributedSemaphoreMaxSleepForRetryInMs": { + "type": "integer", + "format": "int32", + "description": "Max range for exponential backoff." + }, + "eventsTTLInDays": { + "type": "integer", + "format": "int32", + "description": "Number of days events are kept. This is required to determine the point till which we need to query. This should match TTL of the table." + }, + "perNodeJobMaintainerJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Number of perNodeJob maintainer jobs that can run concurrently on a given node." + }, + "enableDiskStreamSemaphore": { + "type": "boolean", + "description": "Whether to use the disk stream semaphore." + }, + "diskStreamSemaphoreMinSize": { + "type": "integer", + "format": "int32", + "description": "Min num streams guarded by a single disk stream semaphore." + }, + "diskStreamSemaphoreChunkSize": { + "type": "integer", + "format": "int32", + "description": "Chunk size used for disk stream semaphore allocation." + }, + "diskStreamSemaphoreStreamsPerDisk": { + "type": "number", + "format": "double", + "description": "Number of streams per disk for optimal throughput." + }, + "diskStreamSemaphoreScaleFactor": { + "type": "integer", + "format": "int32", + "description": "Factor by which to multiply to get the internal equivalent of a single disk stream (here, 1 disk stream = 100 semaphore units)." + }, + "maxRandomSleepBetweenPointQueriesInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep time used for picking the random duration when pacing point queries." + }, + "alwaysForceFullSnapshot": { + "type": "boolean", + "description": "Whether or not to always force a full snapshot, even when an incremental would normally be taken." + }, + "ingestVmToDiskThresholdInGb": { + "type": "integer", + "format": "int32", + "description": "Size of full disks for a VM past which to ingest direct to disk (/sd/snapshot) and not copy." + }, + "snapshotAtomicOpMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Max attempts to add/delete a snapshot to/from virtual machine snapshot index." + }, + "snapshotAtomicOpInitialSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Milliseconds to sleep between attempts to add/delete a snapshot to/from virtual machine snapshot index." + }, + "maxRandomSleepBetweenVirtualMachinePointQueriesMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep time used for picking the random duration when pacing point queries on the virtual machine table." + }, + "maxRandomSleepBetweenPagesInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep time used for picking the random duration when pacing paginated iterate queries." + }, + "blobStorePrefixPathInSdSnapshot": { + "type": "string", + "description": "Blobstore root working directory." + }, + "blobStorePrefixPathInSdScratch": { + "type": "string", + "description": "Blobstore root scratch directory." + }, + "logReceiverPrefixPathInSdSnapshot": { + "type": "string", + "description": "Log Receiver root working directory." + }, + "oldBlobStorePrefixPathInSdScratchBlobstore": { + "type": "string", + "description": "Previously, we stored local transient blobs under this path; however, in order to support flash quota management, we have switched to blobStorePrefixPathInSdScratchBlobstore. This flag is currently maintained for cleanup of the old scratch blobstore directory." + }, + "blobStorePrefixPathInSdScratchBlobstore": { + "type": "string", + "description": "Root working directory for local transient blobs. These blobs will be created on flash and used only for passthrough currently." + }, + "snappablePrefixPathInSdSnapshot": { + "type": "string", + "description": "Snappables root working directory." + }, + "snappablePrefixPathInSdScratch": { + "type": "string", + "description": "Snappables root scratch directory." + }, + "eventArtifactsPrefixPathInSdSnapshot": { + "type": "string", + "description": "Event artifacts root snapshot directory." + }, + "stormPrefixPathInSdScratch": { + "type": "string", + "description": "Storms root scratch directory." + }, + "converterPrefixPathInSdScratch": { + "type": "string", + "description": "Converters root scratch directory." + }, + "downloadDirPrefixPathInSdScratch": { + "type": "string", + "description": "Download file job root scratch directory." + }, + "eventArtifactsRetentionInDays": { + "type": "integer", + "format": "int32", + "description": "How many days we should keep event artifacts for." + }, + "eventArtifactsGcFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Event artifacts GC period." + }, + "mergedSpecPrefixStrip": { + "type": "string", + "description": "Prefix to strip from MergedSpec." + }, + "defaultThriftSocketRequestTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "The timeout for the thrift response." + }, + "snappableMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to add an entry into Snappable table." + }, + "snappableInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for Snappable insertion." + }, + "vmwareSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the VMware snapshot job." + }, + "hypervSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the hyperv snapshot job." + }, + "disableNetworkOnClonedVm": { + "type": "boolean", + "description": "Disable network on exported, restored or mounted VM." + }, + "storageArraySnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the storage array snapshot job." + }, + "mssqlSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the Mssql snapshot job." + }, + "maximumDbLogBlobChainLength": { + "type": "integer", + "format": "int32", + "description": "The maximum number of blobs in a single blob chain for database logs." + }, + "memoizedMetadataThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to be used by MemoizedMetadata class." + }, + "slaDomainEditPoThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to be used by EditSlaDomain pending operation processing." + }, + "maxSleepForImmutableDirectoryCreationInMs": { + "type": "integer", + "format": "int32", + "description": "max random sleep in between retries to acquire or release the lock to create the immutable directories." + }, + "injectCbtCorruptionException": { + "type": "boolean", + "description": "This is a parameter used only for testing the CBT corruption code path in a CREATE_SNAPSHOT job. This parameter should be removed once we have a better testing framework, that can inject failures using byteman. This parameter should be set to true only in test mode. If true, make the CopyBluePrintFile call fail to simulate CBT corruption." + }, + "numFileChannelTransferRetries": { + "type": "integer", + "format": "int32", + "description": "FileChannel.transferTo/From doesn't always transfer all the requested bytes, and it therefore needs to be retried. This is the maximum number of times we will allow it to retry." + }, + "smbDefaultOptions": { + "type": "string", + "description": "Smb default options. CSV list of options." + }, + "sdfsServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to SDFS service." + }, + "sdfsServiceCopyFileSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for SDFS copyFile." + }, + "sdfsServiceCopyFileRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for SDFS copyFile." + }, + "streamRouterServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to stream router service." + }, + "waspServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to wasp server." + }, + "agentServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to agent server service." + }, + "sdfsPatchFIleCreationDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay between retries of checking sdfs patch file createion job progress." + }, + "gracePeriodToDeleteJobInMs": { + "type": "integer", + "format": "int32", + "description": "Grace period to delete a job partition. This is needed to avoid a race where a job partition is created but the very first instance is not yet populated." + }, + "conflictHandlerOpRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the conflict handler operations." + }, + "systemStorageNotificationThreshold": { + "type": "integer", + "format": "int32", + "description": "Initial warning threshold which triggers a system storage notification. The system sends a notification when the percentage of used storage capacity meets or exceeds the initial warning threshold. After the notification is sent, storage notifications are deactivated until the percentage of capacity drops below the warning reset value, or the percentage of used storage capacity meets or exceeds the higher secondary warning threshold value." + }, + "systemStorageNotificationResendThreshold": { + "type": "integer", + "format": "int32", + "description": "Secondary warning threshold which triggers additional system storage notifications. While the percentage of used storage capacity meets or exceeds the secondary warning threshold, the system continues to send storage notifications at regular specified intervals. The value of the secondary warning threshold must be greater than or equal to the value of the initial warning threshold." + }, + "systemStorageNotificationResendTimePeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Numbers of minutes to wait between secondary warning threshold notifications." + }, + "systemStorageThresholdNotificationReset": { + "type": "integer", + "format": "int32", + "description": "Warning reset value for system storage notifications. The value represents a percentage of used storage capacity which triggers a reset of system storage notifications. When storage capacity drops below the warning reset value the initial warning threshold is reactivated. The warning reset value must be less than or equal to the value of the initial warning threshold." + }, + "systemStorageNotificationEnabled": { + "type": "boolean", + "description": "Used System Storage capacity notification. Set to true to enable notification. Set to false to disable notification." + }, + "replicationDelegationRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for replicaiton delegation requests." + }, + "appBlueprintSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for a Blueprint snapshot job." + }, + "replicationDelegationDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay between retries of replication delegation requests." + }, + "allowReplicationJobPruneWithoutDelegation": { + "type": "boolean", + "description": "Allow pruning of replicaiton job instances even if delegation fails." + }, + "gcPublicCloudImageAndInstanceJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent GC AMI jobs per node." + }, + "gcPublicCloudImageAndInstanceJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for GC jobs in minutes." + }, + "throttleDifferentEventDelaySeconds": { + "type": "integer", + "format": "int32", + "description": "When posting a throttling event, we will not post the event if the most recent event for the job is different than this one, but an event the same as this one was posted less than this many seconds ago." + }, + "earlySnapshotAllowancePercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of relaxation of Sla policy that we allow. For example, for 10 minute Sla frequency, and if we have to take a snapshot at time X, we allow the snapshots to take place in [X-5, INF] minutes interval." + }, + "enableDetailedJobTaskLogging": { + "type": "boolean", + "description": "When set to true, JFL will log every time a task starts and finishes. This is useful for benchmarking, but disabled by default to reduce log spamming." + }, + "distributedBarrierPollingIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "The waiting time for distributed barrier to wait for other threads to complete register successfully." + }, + "maxEventsInEventSeries": { + "type": "integer", + "format": "int32", + "description": "If the number of events in an event series is at least this value, we will not issue postponing events." + }, + "jobConfigSaveMaxRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times job-config-save may be retried." + }, + "maximumPossibleSlaToleranceInHours": { + "type": "integer", + "format": "int32", + "description": "Maximum possible tolerance allowed in hours for any Sla. We use this to cap the tolerance to some fixed value(12 hours) currently." + }, + "sampleFingerprintCount": { + "type": "integer", + "format": "int32", + "description": "maximum number of fingerprints to validate per disk." + }, + "hydrationGcJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent hydration gc jobs per node." + }, + "hydrationGcJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Hydration Gc job interval in minutes." + }, + "vmwareHydrationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vmware hydration jobs per node." + }, + "incrementalExportInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of incremental export that can be operated in parallel." + }, + "vmwareHydrationJobThrottleDelayInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time a hydration job will wait before rechecking whether a new snapshot to hydrate exists on the cluster." + }, + "maxNumOfVmwareSnapshotsOnHydratedVm": { + "type": "integer", + "format": "int32", + "description": "The number of VMware snapshots that a hydrated vm can have at at a given time." + }, + "failoverToBrikJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent failover to brik jobs per node." + }, + "appAppflowsSnappableOperationsJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent AppflowsSnappableOperations jobs per node." + }, + "hydrationJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Hydration job interval in minutes." + }, + "sleepTimeForTemporaryVMReadinessInSec": { + "type": "integer", + "format": "int32", + "description": "Thread sleep time in seconds before rechecking the readiness of the instantiated virtual machine." + }, + "sleepTimeForPingingTemporaryVMInSec": { + "type": "integer", + "format": "int32", + "description": "Thread sleep time in seconds before trying next ping request to the instantiated temporary virtual machine." + }, + "maxNumRetriesForTemporaryVMReadiness": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the ping requests to the instantiated temporary virtual machine." + }, + "maxNumRetriesForTemporaryVMPingRequest": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the ping requests to the instantiated temporary virtual machine." + }, + "maxPreserveLocalReferenceTimeInDays": { + "type": "integer", + "format": "int32", + "description": "Maximum retention time for preserveLocalReference in snapshot before it's reference can be cleaned up." + }, + "jsonSerializerUseStrictMode": { + "type": "boolean", + "description": "Whether to use strict mode during json serialization. This will for example reject malformed Option fields." + }, + "numPersistRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry a persist operation for metadata store update to job_status and job_instance tables. These retries are specific to address persist failures." + }, + "initialSleepForPersistRetriesInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep duration in milliseconds when computing the sleep between retries for persist failures." + }, + "scheduleDeferredOdsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval of running the periodic job of scheduling on-demand snapshots which were deferred due to a pause." + }, + "periodicCleanupArchivedObjectsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval of running the periodic job of purging the unused metadata for ghost snappables, slas, hosts." + }, + "shouldDeleteUnusedSlas": { + "type": "boolean", + "description": "Whether to delete the unused slas in periodic unused objects cleanup." + }, + "shouldCleanupUnusedHosts": { + "type": "boolean", + "description": "Whether to delete the unused hosts in periodic unused objects cleanup." + }, + "shouldIncludeRemoteObjects": { + "type": "boolean", + "description": "Whether to purge remote objects in periodic unused objects cleanup on target clusters." + }, + "delayForArchivedObjectsCleanupInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval in minutes by which the purging of the unused metadata is delayed." + }, + "numObjectsForObjectGcAuthzCleanup": { + "type": "integer", + "format": "int32", + "description": "number of grouped objects for which we perform authz deletion." + }, + "minDeletableObjectsForArchivedObjectsCleanup": { + "type": "integer", + "format": "int32", + "description": "Minimum count of purgeable objects to trigger actual deletion." + }, + "auditPersistMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry persisting the audit event." + }, + "auditPersistInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for persisting audit event." + }, + "cachedNodeMethodsRefreshIntervalMs": { + "type": "integer", + "format": "int32", + "description": "The time between node list refreshes in CachedNodeMethods." + }, + "maxPendingOperationsPerQueue": { + "type": "integer", + "format": "int32", + "description": "Maximum number of pending operations allowed in an operation queue." + }, + "maxTerminatedPendingOperationsPerQueue": { + "type": "integer", + "format": "int32", + "description": "Maximum number of terminated pending operations allowed in an operation queue." + }, + "pendingOperationTtlAfterCompletionInSecs": { + "type": "integer", + "format": "int32", + "description": "Time after which completed pending operations will be GC'ed." + }, + "maxProcessOperationsQueueJobPerNode": { + "type": "integer", + "format": "int32", + "description": "Number of process operations queue job per node." + }, + "pendingOperationsMaintenanceJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for per node pending operations maintenance job." + }, + "slaPendingOperationsMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the maximum number of attempts to execute an SLA Domain-related pending operation." + }, + "requeueProcessOperationsQueueJobThresholdInSeconds": { + "type": "integer", + "format": "int32", + "description": "If a pending operation is in queued state after above threshold, then re-queue an on-demand ProcessOperationsQueue job." + }, + "enableSnapshotAudit": { + "type": "boolean", + "description": "Flag to determine if snapshot audit should be captured in the database." + }, + "pruneSnapshotAuditJobRetentionInDays": { + "type": "integer", + "format": "int32", + "description": "Number of days to retain snapshot audit entries in the database." + }, + "pruneSnapshotAuditJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Job frequency in minutes for pruning snapshot audit entries." + }, + "encryptVendorCredentialsInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent encrypt vendor credentials jobs per node." + }, + "encryptVendorCredentialsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Job frequency in minutes for encrypt vendor credentials." + }, + "parallelGraphQlUnmanagedObjectQueryEnabled": { + "type": "boolean", + "description": "If true, summary of graphql unmanaged objects is created in parallel, else it is created sequentially." + }, + "graphQlUnmanagedObjectQueryFutureTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout of futures for getting summary of all required graphql unmanaged objects." + }, + "apiParallelExecutionNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads in thread pool used for executing API queries in parallel. It is kept 10 assuming 8 cores with some hyperthreading benefit." + }, + "enablePeriodicCleanupArchivedObjectsJob": { + "type": "boolean", + "description": "Whether to run the cleanup archived objects job." + }, + "enableVaultForCleanupArchivedObjectsJob": { + "type": "boolean", + "description": "Whether to save purged metadata to vault." + }, + "useExpirationDatesForExpiry": { + "type": "boolean", + "description": "Temporary flag for using expiration dates to expire snapshots." + }, + "asyncAuditGenerationEnabled": { + "type": "boolean", + "description": "If true, audit event will be generated and persisted asynchronously, else it will be persisted within the API." + }, + "asyncSlaEditQueuedEventEnabled": { + "type": "boolean", + "description": "If true, event for SLA domain edit pending operation will be generated and persisted asynchronously, else it will be persisted within SLA patch API." + }, + "toolSnapshotRefExpirationThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of days after which we assume a snapshot reference acquired by a tool to be invalid." + }, + "emailSubscriptionJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for email subscription jobs in minutes." + }, + "toolDirPrefixPathInSdScratch": { + "type": "string", + "description": "Root scratch directory for files downloaded by tools." + } + } + }, + "GlobalCloudConfig": { + "type": "object", + "properties": { + "maxCloudRequestRetrySleepTimeMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Cloud requests sleep for the specified interval between retries." + }, + "maxCloudRequestRetries": { + "type": "integer", + "format": "int32", + "description": "Specifies the number of times a Cloud request is retried." + }, + "collectLogsFromInstanceService": { + "type": "boolean", + "description": "We might want to disable collection of logs from Instance service to minimize network overhead or curb the flakiness of log download thrift end point." + }, + "retryCountForFetchingLogsFromTransientInstance": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when fetching logs from the transient instance." + }, + "retryCountToConnectToRubrikTransientInstance": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when establishing contact with transient instance." + }, + "leakedCloudDisksGcCoolOffPeriodInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time after a cloud disk creation at which it will be eligible for garbage collection." + }, + "boltGcCoolOffPeriodInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time after a bolt instance's creation after which it will be eligible for garbage collection." + }, + "polarisManagedRoleBasedAccountRegionForValidation": { + "type": "string", + "description": "Region which will be used for validation of polaris managed IAM based AWS customer account." + }, + "shouldValidateCrossAccountCredentials": { + "type": "boolean", + "description": "Boolean value which determines whether validations for customer's Cross account should be done." + }, + "transientInstanceLogMaxSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Transient Instance logs won't be copied locally if the log bundle size is more than this value." + }, + "transientInstanceLogChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Log Chunk Size to read in each thirft call, in MB." + }, + "transientInstanceLogDirMaxSizeFraction": { + "type": "number", + "format": "double", + "description": "Transient Instance logs won't be copied locally if transientInstanceLogDir size will become more than this fraction of total space on local partition." + }, + "transientInstanceLogMinFreeSpaceFraction": { + "type": "number", + "format": "double", + "description": "Transient Instance logs won't be copied locally if the free space on the local partition will fall below this fraction." + }, + "maxNumberOfDisksSupportedForConversion": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the maximum number of disks supported by the converter." + }, + "boltLogDir": { + "type": "string", + "description": "The path to local storage for copying Bolt logs. The system creates this directory if it does not already exist." + }, + "rivetLogDir": { + "type": "string", + "description": "The path to local storage for copying Rivet logs. The system creates this directory if it does not already exist." + }, + "converterLogDir": { + "type": "string", + "description": "Top level dir where converter logs are copied locally. Will be created if it doesn't exist." + }, + "temporaryInstanceLogDir": { + "type": "string", + "description": "Top level dir where temporaryInstance logs are copied locally. Will be created if it doesn't exist." + }, + "minStoragePerDiskInMBForCICInBolt": { + "type": "integer", + "format": "int32", + "description": "This is the minimum required storage in megabytes within bolt for each disk for one cloud image conversion job." + }, + "stormUsageTimeInMillisecondsForOneMB": { + "type": "integer", + "format": "int32", + "description": "This is the estimated time for storm usage for doing cloud on for a disk of size 1 MB. Here the assumption is that we are able to do conversion at a speed of 10 MBps." + }, + "stormRunTimeInMinutesForLongInstance": { + "type": "integer", + "format": "int32", + "description": "This is the minimum time for which a storm instance should run to be considered as a Long storm." + }, + "enableAzureCloudOnlyPlatform": { + "type": "boolean", + "description": "A Boolean value that determines whether to support Azure cloud functionality exclusively. When 'false' supports Azure, AWS, and GCP cloud functionality. When 'true,' supports Azure cloud functionality only, disabling AWS and GCP cloud functionality. This parameter is for testing purposes only." + }, + "cloudComputeConnectivityCheckInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of connectivity check threads for cloud compute objects in parallel." + }, + "storageToComputeFactorForCloudConsolidation": { + "type": "number", + "format": "double", + "description": "This is the factor with which compute cost for consolidation is multiplied to compare with storage cost of unconsolidated data to judge if public cloud consolidation should be performed." + }, + "cloudComputeConnectivityCheckJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of job to check connectivity for cloud compute in archival locations and cloud sources." + }, + "minTimeForBoltReadyInSec": { + "type": "integer", + "format": "int32", + "description": "This is the time duration by which we need to wait for services to come up on bolt after launching it on the cloud." + }, + "cloudComputeConnectivityCheckSleepInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time between each try of the operations for cloud compute connectivity check job." + }, + "cloudComputeInitScriptCompletionCheckSleepInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time between each check for the init script to run to its completion." + }, + "connectivityCheckInstanceValidSpanInMillis": { + "type": "integer", + "format": "int32", + "description": "The longest time a cloud compute connectivity check instance is allowed to run. Any instance that has been launched over this will be considered leaked and gc'ed. Current value is 6 hours based on tests. (21600000 = 6 * 60 * 60 * 1000)." + }, + "cloudMetadataBackupWindowSize": { + "type": "integer", + "format": "int32", + "description": "The number of metadata backup files to keep on archive." + }, + "instanceServiceLogLevel": { + "type": "string", + "description": "Verbose level at which logs are produced in instance service. This parameter needs to be specified as a part of the ApplyCloudDependencyRequest." + }, + "skipTransientInstanceShutdown": { + "type": "boolean", + "description": "For debugging purposes we might want access to live instance for debugging, so preventing auto shutdown of transient instance during failures." + }, + "skipTransientInstanceShutdownWhenFailure": { + "type": "boolean", + "description": "For debugging purposes, we might want to skip temporary instance shutdowns when we fail to create image from temp instance, due to any reason. This is independent of the config skipTransientInstanceShutdown that always skips their shutdown." + }, + "isCloudComputeConnectivityJobGloballyEnabled": { + "type": "boolean", + "description": "If the cloud compute connectivity check job should be enabled as a globally enabled job. In dev environment this should be false since in most cases we don't want to run this job, as it might cause leakages if the test pod is destroyed while the job is running." + }, + "logArchivalWorkingDirForConverterAndTemporaryInstance": { + "type": "string", + "description": "This path is used to create logs on converter and temporary instance, and fetch on coordinator. Log creation on these transient instance is preformed with relative path and subsequently deleted after transferring to brik." + }, + "maxWaitTimeForStormRequestInSeconds": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in seconds. Conversion jobs wait until the Storm manager retrieves the required transient Storm instances or until the specified interval elapses." + }, + "timeToWaitForCreateRawDisksToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait for create raw disks step to finish on converter. This was roughly put out to be around 4 days assuming 12MBps." + }, + "minimumTimeToWaitInSecsBetweenTwoDiskOperationsInAzure": { + "type": "integer", + "format": "int32", + "description": "Minimum time to wait after executing a disk operation on Azure before executing another disk operation." + }, + "useIo1VolumesForAwsCloudConversion": { + "type": "boolean", + "description": "If set to true, the volume types used in cloud-conversion for aws is io1 instead of standard." + }, + "maxIopsForIo1Volumes": { + "type": "integer", + "format": "int32", + "description": "This is the max number of iops the io1 volumes will be provisioned with in cloud conversion job." + }, + "sleepTimeForCloudImageCreationInMs": { + "type": "integer", + "format": "int32", + "description": "This is the amount of milliseconds we wait before polling for image creation status in the cloud." + }, + "doMinimalTaggingForCloudOn": { + "type": "boolean", + "description": "If set to true then minimal tagging is done to resources created on cloud." + }, + "useRangeReadsForDownloadFromCloud": { + "type": "boolean", + "description": "If set to true, download objects from cloud using range reads instead of trying to download entire object in a single call. The direct download of an object can deliver much better throughput on a good network, however, it can cause resiliency issues due to intermittent failures on flaky or low bandwidth networks. The range reads based download provides incremental progress in such environments." + }, + "consecutiveFailureCountForCloudonFallback": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times that a cloudon incremental conversion job can fail before the system falls back to full conversion." + }, + "shouldUseOsInformationFromPeerSnapshots": { + "type": "boolean", + "description": "If set to true, then we use the OS disk information from other snapshots in the chain too if the information is not available with the snapshot that is getting converted. If set to false, then we don't rely on other snapshots' OS disk information in case the information is not available with current snapshot; Instead mount the snapshot and retrieve the information." + }, + "enableLeakedSnapshotGC": { + "type": "boolean", + "description": "Enable GC for cloud snapshots that might be leaked by failed cloud conversion jobs and failed garbage collection attempts." + }, + "shouldRebootWindowsTempInstanceTwiceForStaticIp": { + "type": "boolean", + "description": "If set to true, we will try to reboot the temp instance again if ping fails in the first attempt. This additional reboot is needed for static IP based Windows VMs." + }, + "isArchivalReaderWithConversionsEnabled": { + "type": "boolean", + "description": "If set to true, cloud image conversions will be allowed if this cluster connects as an archival reader." + }, + "minStoragePerDiskInMBForCICInRivet": { + "type": "integer", + "format": "int32", + "description": "This is the minimum required storage in megabytes within Rivet for each disk for one cloud image conversion job." + }, + "enableBootDiagnosticsForTemporaryInstanceOnAzure": { + "type": "boolean", + "description": "If set to true, captures the boot diagnostics of the instance in a newly created container in the general purpose storage account." + }, + "iopsPerGBForIo1Volumes": { + "type": "integer", + "format": "int32", + "description": "IOPS per GB provided for io1 volumes on AWS during cloud image conversion. This cannot exceed 50 because of AWS constraints." + }, + "useRivetImageForCloudConversion": { + "type": "boolean", + "description": "If set to true, then we use a Rivet image for cloud image conversion." + }, + "failureToScheduleConversionNotificationDedupeWindow": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Multiple failures to schedule a conversion job for a snappable generate a single notification for this window." + }, + "shouldTransferProxyInfoToBolt": { + "type": "boolean", + "description": "If set to true, then we send compute proxy and archival proxy information to the bolt instance." + }, + "maxTagsPerScopeLocation": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of user-defined tags per archival location." + }, + "maxTagsOnAws": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of tags allowed on AWS." + }, + "maxTagsOnAzure": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of tags allowed on Azure." + }, + "maxTagsOnGcp": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of tags allowed on GCP." + } + } + }, + "GlobalCloudUtilizationConfig": { + "type": "object", + "properties": { + "cloudUtilizationForecastLogicalDataSizeGB": { + "type": "number", + "format": "double", + "description": "Size of the logical data the SLA Domain is assumed to protect to forecast the cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastAnnualGrowthPercent": { + "type": "number", + "format": "double", + "description": "Percentage of annual data growth assumed to forecast the cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastCompressionFactor": { + "type": "number", + "format": "double", + "description": "Compression factor assumed to forecast the cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastDailyChangeRatePercent": { + "type": "number", + "format": "double", + "description": "Daily change rate percentage assumed to forecast the amount of cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastWeeklyChangeRatePercent": { + "type": "number", + "format": "double", + "description": "Weekly change rate percentage assumed to forecast the amount of cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastMonthlyChangeRatePercent": { + "type": "number", + "format": "double", + "description": "Monthly change rate percentage assumed to forecast the amount of cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastQuarterlyChangeRatePercent": { + "type": "number", + "format": "double", + "description": "Quarterly change rate percentage assumed to forecast the amount of cloud utilization of an SLA Domain." + }, + "cloudUtilizationForecastYearlyChangeRatePercent": { + "type": "number", + "format": "double", + "description": "Yearly change rate percentage assumed to forecast the amount of cloud utilization of an SLA Domain." + }, + "defaultCloudUtilizationForecastRegionForAws": { + "type": "string", + "description": "If we do not have prices listed for region configured for AWS archival location, we assume this region for forecasting cloud utilization on it." + }, + "defaultCloudUtilizationForecastRegionForAzure": { + "type": "string", + "description": "If we do not have prices listed for region configured for Azure archival location, we assume this region for forecasting cloud utilization on it." + }, + "defaultCloudUtilizationForecastRegionForGcp": { + "type": "string", + "description": "If we do not have region configured for GCP archival location or if do not have prices listed for region configured for GCP archival location, we assume this region for forecasting cloud utilization on it." + }, + "cloudUtilizationPricingConfigsCachedDurationSec": { + "type": "integer", + "format": "int32", + "description": "Duration in seconds for which cloud utilization pricing configs are cached in memory. If any price values are updated, we need to either restart the spray-server service or wait for this duration for new price values to be used for forecasting." + }, + "awsGp2VolumeSizeUsedForArchivalOperationsInGB": { + "type": "integer", + "format": "int32", + "description": "Size of AWS gp2 volume assumed to be used for archival operations like consolidation and reverse." + }, + "awsSt1VolumeSizeUsedForArchivalOperationsInGB": { + "type": "integer", + "format": "int32", + "description": "Size of AWS st1 volume assumed to be used for archival operations like consolidation and reverse." + }, + "sizerTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for calling sizer script, in seconds." + }, + "sizerCpuRuntimeLimitInSec": { + "type": "integer", + "format": "int32", + "description": "CPU runtime limit for sizer script, in seconds." + }, + "sizerMemoryLimitInMB": { + "type": "integer", + "format": "int32", + "description": "Memory limit for sizer script, in MB." + }, + "awsPrices": { + "type": "string", + "description": "Aws prices. Storage prices are per GB-month. For STANDARD storage price, we take the maximum price. Instance prices are per hour. For EC2 instance, we take Linux price. Volume prices are per GB-month. Data retrieval prices are per GB. Early deletion prices are per GB-month. Early deletion thresholds are min duration for each storage class. These prices and thresholds are generated by get_aws_pricing_configs.py. We should update e2e test in cloudout_utilization_forecast_test.py also when we change these prices." + }, + "azurePrices": { + "type": "string", + "description": "Azure prices. Storage prices are for blobs and LRS redundancy per GB-month. For storage price, we take the recommended maximum price. For virtual machine, we take CentOS or Ubuntu Linux prices. Instance prices are per hour. Volume prices are per month. Data retrieval prices are for blobs and LRS redundancy per GB. For data retrieval price, we take the recommended price. Early deletion prices are per GB-month. Early deletion thresholds are min duration for each storage class. These prices are generated by script get_azure_pricing_configs.py. We should update e2e test in cloudout_utilization_forecast_test.py also when we change these prices." + }, + "gcpPrices": { + "type": "string", + "description": "Gcp prices. Storage prices are per GB-month. Early deletion prices are per GB-d. Early deletion thresholds are min duration for each storage class. These prices are generated by get_gcp_pricing_configs.py. We should update e2e test in cloudout_utilization_forecast_test.py also when we change these prices." + } + } + }, + "GlobalCrystalConfig": { + "type": "object", + "properties": { + "enableSingleNodeMode": { + "type": "boolean", + "description": "Whether to display UI associated with single node appliance." + }, + "isCommunityEdition": { + "type": "boolean", + "description": "Whether to appliance is a Rubrik CommunityEdition appliance." + }, + "enableThrottlingUi": { + "type": "boolean", + "description": "Whether to display UI associated with throttling." + }, + "enableDisplayDomainNames": { + "type": "boolean", + "description": "Whether to display domain names on the login page." + }, + "webSessionTimeoutMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes a web session can be idle before token expiration." + }, + "webSessionExpirationTimeMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes before a web session is expired regardless of last usage time." + }, + "webSessionsPerUser": { + "type": "integer", + "format": "int32", + "description": "Number of active web sessions a user can have." + }, + "enableConvertToCloud": { + "type": "boolean", + "description": "Whether to display UI for convert-to-cloud feature." + }, + "enableLanguageSelection": { + "type": "boolean", + "description": "Whether to display language selection." + }, + "tracerName": { + "type": "string", + "description": "Tracer type to use; zipkin." + }, + "tracerAddress": { + "type": "string", + "description": "Tracer address to send opentracing to." + }, + "tracerPort": { + "type": "integer", + "format": "int32", + "description": "Tracer port to send opentracing to." + }, + "useZxcvbnForUserPasswords": { + "type": "boolean", + "description": "Require use of ZXCVBN library when creating new passwords." + }, + "isHyperVAzureLaunchOnCloudEnabled": { + "type": "boolean", + "description": "Whether to enable enable cloud on of HyperV VMs to Azure." + }, + "enableDcaArchivalLocationCreation": { + "type": "boolean", + "description": "Whether to enable DCA locations on the cluster. A DCA location maybe an archival location or an Aws account for native protection. When set to false, creating, updating or reconnecting to DCA locations will be disabled in the UI and API, but existing DCA locations will still be visible and are not automatically deleted." + }, + "maxUserVisibleExceptionTruncationChars": { + "type": "integer", + "format": "int32", + "description": "Max number of characters of the Event Internal error message we will log if truncation is enabled for the message." + }, + "enablePreserveMoidOption": { + "type": "boolean", + "description": "Whether to enable the Preserve MOID option in VMware vSphere instant recovery dialog." + }, + "objectCountValueRefreshInterval": { + "type": "integer", + "format": "int32", + "description": "Interval for refreshing the memoization of object count value for each organization." + }, + "hierarchySearchCacheRefreshInterval": { + "type": "integer", + "format": "int32", + "description": "Interval for refreshing the hierarchy search cache." + }, + "isDownloadFileFromSnapshotDisabledOnCluster": { + "type": "boolean", + "description": "Determines whether admins/org admins/end users are able to perform file downloads from within snapshots." + }, + "isEmailNotificationEnabled": { + "type": "boolean", + "description": "Determines whether to send email notification to user or not." + }, + "cleanupHealthCheckStatusJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cleanup health check status jobs per node." + }, + "cleanupHealthCheckStatusJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the cleanup health check status job in minutes." + }, + "auditEventDebouncerPollFrequency": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds in between each poll to determine if there are audit events to publish on the backend." + }, + "defaultMaxRetriesOfEmailSend": { + "type": "integer", + "format": "int32", + "description": "Default maximum number of retry attempts for sending notification emails." + }, + "healthCheckFailureNotificationInterval": { + "type": "integer", + "format": "int32", + "description": "The number of minutes to wait before sending another notification for a Cluster Health Check related failure. Each type of Health Check waits on its own interval." + }, + "sendEmailJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent email sending job per node." + }, + "emailTemplateFilesPath": { + "type": "string", + "description": "Email templates file path." + }, + "enableVmTags": { + "type": "boolean", + "description": "Whether to display UI associated with Vm tags/categories." + }, + "verifyIsGhostJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent verify is_ghost jobs per node." + }, + "verifyIsGhostJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the verify is_ghost job in minutes." + }, + "verifyEffectiveSlaDomainNameJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent verify effective_sla_domain_name jobs per node." + }, + "verifyEffectiveSlaDomainNameJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the verify effective_sla_domain_name job in minutes." + }, + "eventObjectNameSearchLimit": { + "type": "integer", + "format": "int32", + "description": "Number of matching object ids to return when conducting an event search by object name." + }, + "eventQuerylimit": { + "type": "integer", + "format": "int32", + "description": "Maximum query limit for events and event_series table." + }, + "enableManagedVolumeRecoverXTag": { + "type": "boolean", + "description": "Whether to show RecoverX application tag option when creating managed volume." + }, + "enableEventsDb": { + "type": "boolean", + "description": "Whether to use EventsDb 2.0. On migrated clusters, it is true. On unmigrated clusters, it should be false, unless it is manually set to true, which means the cluster will be mirated during next upgrade." + }, + "gcEventsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of GC_EVENTS job in minutes." + }, + "gcEventsJobMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Max number of instances of GC_EVENTS job per node." + }, + "shouldAllowCloudOnForLinuxVMsOnAzure": { + "type": "boolean", + "description": "Whether to allow Cloud On for non-Windows VMs onto Azure." + }, + "globalSearchEnabled": { + "type": "boolean", + "description": "Feature flag for global search frontend and backend." + }, + "enableRetentionLockSla": { + "type": "boolean", + "description": "When set to true, the user will be able to create Retention Locked SLAs and convert existing regular SLAs to Retention Locked type. This is disabled by default as most users do not have a requirement of this feature. It can only be modified by Rubrik support and not by a user." + }, + "enableEmailForFirstBackupRetryFailure": { + "type": "boolean", + "description": "Whether to send email on first backup retry failure." + }, + "emailKbUrlPrefix": { + "type": "string", + "description": "URL prefix for knowledge base link in notification emails." + }, + "maximumSecsToSpendApplyingQueuedPointUpdates": { + "type": "integer", + "format": "int32", + "description": "Maximum number of seconds to spend applying queued point updates at the end of periodic refresh before swapping memory." + }, + "enableNonBlockingSlaAssignment": { + "type": "boolean", + "description": "Whether to trigger non-blocking cache refreshes during SLA assignment instead of blocking cache refreshes. This is a global override for how SLA assignments are treated in the cache, and will result in inconsistent results being returned from the cache in exchange for speed. Should only be enabled for very large clusters whose UI is nearly unusable due to slow SLA assignments." + }, + "enableTelemetryManagement": { + "type": "boolean", + "description": "Feature flag for telemetry management UI." + }, + "enableDataClassificationPreviewer": { + "type": "boolean", + "description": "Whether to enable the Data Classification Previewer page." + }, + "defaultEventQueryLimit": { + "type": "integer", + "format": "int32", + "description": "Default limit value in the Event API query." + }, + "nonAdminUserQueryLimitMultiplier": { + "type": "integer", + "format": "int32", + "description": "We add a multiplier to the query limit when a non admin user performs the query since they are authorized to view less events. This will reduce the overall number of queries needed." + }, + "defaultEventCsvRowLimit": { + "type": "integer", + "format": "int32", + "description": "Default number of events to return as row in CSV from the Event CSV download API." + }, + "enableFilesetFailoverCluster": { + "type": "boolean", + "description": "Whether to enable Failover Cluster on UI." + }, + "emailRubrikLogoResourcePath": { + "type": "string", + "description": "Static link to rubrik logo image used in sending emails." + }, + "emailInformationalIconResourcePath": { + "type": "string", + "description": "Static link to rubrik logo image used in sending emails." + }, + "emailFailureIconResourcePath": { + "type": "string", + "description": "Static link to rubrik logo image used in sending emails." + }, + "emailWarningIconResourcePath": { + "type": "string", + "description": "Static link to rubrik logo image used in sending emails." + }, + "reportsRubrikLogoResourcePath": { + "type": "string", + "description": "Static link to rubrik logo img used in sending email reports." + }, + "useMultipleQueriesReplaceInOperatorLatestEvents": { + "type": "boolean", + "description": "Used to test performance and functionality of switching from In operator to multiple Equal operator queries for latest events." + }, + "maxNumRowsEqualQueryLatestEvents": { + "type": "integer", + "format": "int32", + "description": "In order to avoid potential memory leaks from the new implementation of aggregate object API for latest_events, we set a maximum rowSize. If the requested rowSize is larger than this value we use the legacy implementation." + }, + "invalidateUniversalRefreshRootRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry updating the Universal Refresh Root cache UUIDs." + }, + "invalidateUniversalRefreshRootSleepInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to update the Universal Refresh Root cache UUIDs." + }, + "managedHierarchyCacheRefreshIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "The time between background ManagedHierarchyCache refreshes." + }, + "staleHierarchyRefreshIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "The time between background fetches of the Universal Hierarchy from the ManagedHierarchyCache in Spray Server to refresh stale hierarchies." + }, + "enablePeriodicBackgroundManagedHierarchyCacheRefresh": { + "type": "boolean", + "description": "Whether to enable to workflow to periodically execute a full refresh of the ManagedHierarchyCache in the background. This turns non-blocking cache invalidations into a no-op." + }, + "enableEfficientManagedHierarchyRootRefreshes": { + "type": "boolean", + "description": "Whether to use the more efficient enumerators when refreshing a root cache in the managed hierarchy cache." + }, + "slaOperationPruneThresholdInMinutes": { + "type": "integer", + "format": "int32", + "description": "The age at which an SLA operation should be pruned." + }, + "filterGhostSnappablesOutOfManagedHierarchyCache": { + "type": "boolean", + "description": "Whether ghost snappables will be filtered out of the managed hierarchy cache." + }, + "filterSnappablesAsSlaDomainAncestorsInManagedHierarchyQuery": { + "type": "boolean", + "description": "Whether permissions checked on SLA Domains will consider snappables as ancestors of the SLA Domain." + }, + "maximumSecondsToSpendMergingQueuedRootRefreshes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of seconds to spend merging queued root refreshes during a periodic background refresh of the Managed Hierarchy Cache." + }, + "maximumRootRefreshQueueSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of root refreshes that can be queued up to be merged during a periodic background refresh of the Managed Hierarchy Cache." + }, + "enableAsyncSlaAssignment": { + "type": "boolean", + "description": "A Boolean that specifies whether asynchronous SLA operation assignments in Spray are enabled, meaning the SLA operation poller is running. Note that if this is disabled, any calls to the async SLA assignment endpoint will result in an SLA assignment appearing not to complete forever." + }, + "slaManagerMaintenancePeriodicityInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds between SLA operation maintenance loops." + }, + "enableMinuteSlaGranularity": { + "type": "boolean", + "description": "Whether to enable Minute SLA Granularity on UI." + }, + "minMinuteSlaFrequency": { + "type": "integer", + "format": "int32", + "description": "Minimum minute value for SLA Frequency available by UI." + }, + "allowSantaLogo": { + "type": "boolean", + "description": "Whether to show Santa logo during Christmas." + }, + "enableRubrikAppSyslogFacility": { + "type": "boolean", + "description": "Whether to allow configuration of the RubrikApp syslog facility. WARNING - Please ask shield before enabling this configuration, and document it in the SFDC notes which show up in the MOTD for the customer that this toggle is enabled." + }, + "asyncSlaAssignmentCompletionAgeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in seconds. When a CalculateEffectiveSla job completes, SLA Domain assignment is assumed complete and present in the managed hierarchy cache for Spray after the specified interval elapses. Set this value to the maximum time required for a full periodic hierarchy refresh for Spray." + }, + "enableCacheUniversalHierarchy": { + "type": "boolean", + "description": "Whether to cache the universal hierarchy in the managed hierarchy cache." + }, + "enableFilesetFailoverClusterNodeOrder": { + "type": "boolean", + "description": "Whether to enable Failover Cluster node order feature on UI." + }, + "allowMinutelySlaOnlyForManagedVolumes": { + "type": "boolean", + "description": "Whether to allow minutely SLAs Only for Managed Volumes." + }, + "enableMonitoringSubscriptions": { + "type": "boolean", + "description": "Whether to allow subscriptions to monitoring page to be scheduled." + }, + "enableRemoteClusterEventRouting": { + "type": "boolean", + "description": "Whether to route event adding to job-fetcher as a part of the event delegation workflows." + }, + "enableVmwareInPlaceRecovery": { + "type": "boolean", + "description": "Indicates whether in-place recovery should be enabled for VMware virtual machines or not." + }, + "enableVmwareNetworkSelection": { + "type": "boolean", + "description": "Flag to determine if Vmware snapshots will have a network selection functionality for Export/Instant Recover/Mount." + }, + "enableVmwareStretchCluster": { + "type": "boolean", + "description": "Flag to determine if Vmware will have a stretch cluster functionality enabled or not." + }, + "enableTotpReminder": { + "type": "boolean", + "description": "Whether or not to reminder the account about Two-Factor Authentication." + }, + "enableClusterRollingUpgrades": { + "type": "boolean", + "description": "Flag to determine if cluster will have a rolling upgrade functionality enabled or not." + }, + "enableCloudCostInsights": { + "type": "boolean", + "description": "Flag to determine if we should to show the cloud cost insights." + } + } + }, + "GlobalDlcConfig": { + "type": "object", + "properties": { + "statsApiBulkFetchLimit": { + "type": "integer", + "format": "int32", + "description": "Max number of stats that can be fetched in a single bulk API request." + }, + "snapshotDbQueryBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number to snapshots of an object to be fetched in single DB query." + }, + "snapshotQueryTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to get snapshots of a snappable. It's a heuristic. We don't expect it to be more than 120 sec." + }, + "asyncSnapshotSlaAssignmentQueuedEventEnabled": { + "type": "boolean", + "description": "If true, event for queueing SLA domain assignment to snapshots will be generated and persisted asynchronously, else it will be persisted within API to assign SLA to snapshots." + }, + "asyncSnapshotSlaAssignmentQueuedEventTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to persist event for queueing SLA domain assignment to snapshots. It's a heuristic. We don't expect it to be more than 300 sec." + }, + "asyncSlaEditQueuedEventTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to persist event for queueing SLA edit operation. It's a heuristic. We don't expect it to be more than 60 sec." + }, + "asyncPostSlaPatchWarningsEnabled": { + "type": "boolean", + "description": "If true, job to generate post SLA patch warnings will be persisted asynchronously, else it will be persisted within SLA patch API." + }, + "asyncPostSlaPatchWarningsTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to persist post SLA patch warnings within SLA patch API. It's a heuristic. We don't expect it to be more than 120 sec." + }, + "statsQueryTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for querying stats from DB in seconds." + }, + "statsApiParallelExecutionNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads in thread pool used for executing stats API queries in parallel." + }, + "slaSummaryTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to get SLA domain summary. It's a heuristic. We don't expect it to be more than 120 sec." + }, + "slaSummaryBatchSize": { + "type": "integer", + "format": "int32", + "description": "Batch size to create SLA domain summary." + }, + "legalHoldQueryTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Max time to get legal hold summary. It's a heuristic. We don't expect it to be more than 120 sec." + }, + "skipAsyncWorkflowsForPerfMeasurement": { + "type": "boolean", + "description": "Flag to skip API async workflows. This is meant to be enabled only for unit tests." + }, + "asyncAuditGenerationTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for generating and persisting audit event asynchronously." + }, + "snappableBulkQueryTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for snappable bulk query." + }, + "heldSnapshotsQueryPageSize": { + "type": "integer", + "format": "int32", + "description": "Number of held snapshots to fetch from DB in single page." + }, + "maxAllowedDurationFromQueuedPoJobInSecs": { + "type": "integer", + "format": "int32", + "description": "Max permitted duration between queued job instance start time and new operation queue time." + }, + "delayForObjectGcVaultEntryCleanupInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval in minutes after which entries will be removed from object_gc_vault_entry table." + }, + "delayForGhostAccessCountInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval in minutes after which a ghost object starts tracking its accesses via metric and log." + }, + "snappableTypesToGc": { + "type": "string", + "description": "Slash separated snappable types which should be GC'ed by CleanupArchivedObjectsJob." + }, + "delayForPurgedSnappablesStatsCleanupInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum time in minutes after stat's last update time after which the stat entries can be deleted." + }, + "objectProtectionLogEnabled": { + "type": "boolean", + "description": "Feature flag for object protection logs. If false, it will not add any entry in compact_object_protection_log table." + }, + "dlcServerPort": { + "type": "integer", + "format": "int32", + "description": "Port on which DLC GRPC server runs." + }, + "slaEngineServiceEnabled": { + "type": "boolean", + "description": "Feature flag for DLC Sla Engine gRPC service. If false, the code will not use the service and its implementation. Since Sla Engine service uses Sla Config service in its implementation, this flag should only be set if Sla Config service is working." + }, + "snapshotServiceEnabledForVerifySla": { + "type": "boolean", + "description": "Flag to decide whether or not to use the snapshot service gRPC service for getting the snapshot in the verify sla job." + }, + "verifySlaJobOverGrpcEnabled": { + "type": "boolean", + "description": "Flag to decide whether or not to use the gRPC implementation for the verify sla job." + }, + "queryLatestSnapshotForVerification": { + "type": "boolean", + "description": "Whether to query latest snapshots for sla verification." + }, + "replicationComplianceCheckGracePeriodInMins": { + "type": "integer", + "format": "int32", + "description": "Grace period after which a snapshot will be considered in replication compliance computation." + }, + "snapshotServiceEnabled": { + "type": "boolean", + "description": "Flag to decide whether to use the gRPC implementation of snapshot service." + }, + "expireSnappableSnapshotJobOverGrpcEnabled": { + "type": "boolean", + "description": "Whether or not to use the gRPC implementation for expire snappable snapshot job." + } + } + }, + "GlobalEnvisionConfig": { + "type": "object", + "properties": { + "reportsPrefixPathInSdSnapshot": { + "type": "string", + "description": "Reports root working directory." + }, + "reportsPrefixPathInSdScratch": { + "type": "string", + "description": "Reports root scratch directory." + }, + "fscryptTempDirectory": { + "type": "string", + "description": "fscrypt temp directory." + }, + "reportsDirTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to live of files in reports working directory." + }, + "reportsScratchDirTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "Staleness in seconds before a report file on scratch can be deleted. This defaults to number one week." + }, + "reportsCsvDirTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to live of files in reports working directory." + }, + "generateCustomReportsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the generate custom report jobs in minutes." + }, + "generateCustomReportsJobStaggeringOffsetInMinutes": { + "type": "integer", + "format": "int32", + "description": "Staggering offset to schedule the job in minutes." + }, + "createPrecannedReportForOrganization": { + "type": "boolean", + "description": "Whether to create precanned report for orgs." + }, + "generateReportDataSourcesJobStaggeringOffsetInMinutes": { + "type": "integer", + "format": "int32", + "description": "Staggering offset, in minutes, to schedule report data source job." + }, + "emailReportsJobStaggeringOffsetInMinutes": { + "type": "integer", + "format": "int32", + "description": "Staggering offset for scheduling email report jobs." + }, + "emailSubscriptionJobStaggeringOffsetInMinutes": { + "type": "integer", + "format": "int32", + "description": "Staggering offset for scheduling email subscription jobs." + }, + "updateReportStatsFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency for update report stats job." + }, + "cleanupReportsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of reports cleanup job." + }, + "cleanupReportJobInstancesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of reports cleanup job." + }, + "cleanupSubscriptionsJobInstancesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of subscriptions cleanup job." + }, + "cleanupReportStatsRetentionMonths": { + "type": "integer", + "format": "int32", + "description": "Staleness in months before report stats can be deleted." + }, + "jobMonitoringMaintenanceFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of the jobMonitoring maintenance task that will clean up jobs that ended over 24 hours ago, and reset counts stored in the job_monitoring_count table." + }, + "jobMonitoringCountCacheFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of the jobMonitoring count cache task that will update the counts in job_monitoring_count table." + }, + "cleanupJobActionRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Staleness in months before report stats can be deleted." + }, + "cleanupReportJobInstanceForSnapshotJobsRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Staleness in days before a report job instance can be deleted." + }, + "cleanupReportJobInstanceForLogJobsRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Staleness in days before a log instance can be deleted." + }, + "cleanupReportJobInstanceIntervalDays": { + "type": "integer", + "format": "int32", + "description": "The number of days prior to the cutoff date for which report job instances should be deleted." + }, + "cleanupReportStatsFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency for cleanup report stats job." + }, + "cleanupJobActionTimeSeriesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency for cleanup job action time series job." + }, + "diagnosticDatasourceRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Number of days in TaskDiagnostic data source." + }, + "backFillAuthzForReportableTasks": { + "type": "boolean", + "description": "Whether to use add authz info on reportable tasks during report data source generation." + }, + "maxDaysInOneHistoricalTaskJob": { + "type": "integer", + "format": "int32", + "description": "Maximum days on historical task data source job can include." + }, + "useHistoricSlaForReports": { + "type": "boolean", + "description": "Whether to use historic sla in custom reports. This is currently limited to protection task reports only." + }, + "verifySlaWithOnDemandSnapshot": { + "type": "boolean", + "description": "Whether to use on demand snapshots for sla verification." + }, + "enableManagedChildObjectsForReports": { + "type": "boolean", + "description": "Whether to show reports data for objects managed by a parent. Should default to false to prevent any redundancy." + }, + "enableGlobalObjectsCaching": { + "type": "boolean", + "description": "Whether to use the optimized version of the creator cache when generating data sources." + }, + "enableBackdoorFailure": { + "type": "boolean", + "description": "flag used to make events fail to test internal error." + }, + "preventMonitoringMigration": { + "type": "boolean", + "description": "flag used to prevent job monitoring migration." + }, + "creatorCacheThreadCount": { + "type": "integer", + "format": "int32", + "description": "How many threads to use when parallelizing the computation for the report snappables." + }, + "frequentReportDataSourceAuthzThreadCount": { + "type": "integer", + "format": "int32", + "description": "How many threads to use when parallelizing the authz computation for the frequent report data source." + }, + "useFscryptTempForReportSqliteTempFileStorage": { + "type": "boolean", + "description": "Whether to use fscrypt temp directory for report temp store storage, otherwise will use /var/temp." + }, + "useInteractiveIoForApi": { + "type": "boolean", + "description": "Whether to use interactive io for API request, need to restart spray server to make the config effective." + }, + "archivedJobMonitoringRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Number of days to keep entry in archived_job_mointoring_info." + }, + "archivedJobMonitoringGCJobFrequency": { + "type": "integer", + "format": "int32", + "description": "Frequency of ARCHIVED_JOB_MONITORING_GC job in minutes." + }, + "archivedJobMonitoringGCJobInstancePerNode": { + "type": "integer", + "format": "int32", + "description": "Max instances per node for ARCHIVED_JOB_MONITORING_GC job." + }, + "maxReplicationSnapTimestampEntries": { + "type": "integer", + "format": "int32", + "description": "Max number of replication snapshot timestamps stored in sqlite." + }, + "jobMonitoringCountCacheLimit": { + "type": "integer", + "format": "int32", + "description": "Maximmum number of rows before cached value for monitoring count is shown in the UI." + } + } + }, + "GlobalEnvoyConfig": { + "type": "object", + "properties": { + "envoyMssqlFileTransferParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for transferring a file from the Rubrik cluster to a remote MSSQL host through the Envoy VM." + }, + "envoyMssqlFileRestoreParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for restoring a file from the Rubrik cluster to a remote MSSQL host through the Envoy VM." + } + } + }, + "GlobalFilesetConfig": { + "type": "object", + "properties": { + "rbaBulkReaderTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Socket timeout in seconds for bulk read operations." + }, + "rbaBulkReaderTransferSize": { + "type": "integer", + "format": "int32", + "description": "Maximum size for bulk read data transfers." + }, + "rbaBulkReaderTransferSizeAix": { + "type": "integer", + "format": "int32", + "description": "Maximum size for bulk read data transfers for AIX hosts." + }, + "rbaBulkReaderBatchFiles": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to accumulate in a single batch read." + }, + "rbaBulkReaderEnableSparseFiles": { + "type": "boolean", + "description": "Whether sparse files support should be enabled for backup. Sparse file handling is only enabled if this config is true AND platform specific sparse file config is true." + }, + "rbaBulkStatTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Socket timeout in seconds for bulk stat operations." + }, + "rbaBulkStatBatchFiles": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to accumulate in a single bulk stat batch." + }, + "rbaBulkStatBatchChildren": { + "type": "boolean", + "description": "If the Bulk stat call to a directory should batch the children for a directory in multiple calls." + }, + "rbaBulkStatChildrenBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number of children to return in a single bulk stat call for a directory. This is used only if rbaBulkStatBatchChildren is set to true." + }, + "rbaBulkStatPathMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to stat a path for certain scenarios. Currently this is only used for Flashblade SMB which stating mount root directory always fails in the first try and causing empty fileset." + }, + "rbaBulkStatWritebackInodeStack": { + "type": "boolean", + "description": "Whether to automatically page the inode stack to secondary storage to limit memory usage." + }, + "rbaBulkWriterTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Socket timeout in seconds for bulk write operations." + }, + "rbaBulkWriterBatchFiles": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to accumulate in a single batch write." + }, + "rbaBulkWriterTransferSize": { + "type": "integer", + "format": "int32", + "description": "Maximum size for bulk write data transfers." + }, + "rbaBulkWriterBatchDirectories": { + "type": "integer", + "format": "int32", + "description": "Maximum number of directories to accumulate in a single batch write." + }, + "rbaBulkWriterBatchSymlinks": { + "type": "integer", + "format": "int32", + "description": "Maximum number of symlinks to accumulate in a single batch write." + }, + "rbaBulkWriterBatchSetStats": { + "type": "integer", + "format": "int32", + "description": "Maximum number of set stats to accumulate in a single batch write." + }, + "rbaBulkWriterMaxLogRestoreErrors": { + "type": "integer", + "format": "int32", + "description": "Maximum number of file errors to log when restoring files." + }, + "rbaBulkWriterEnableSparseFiles": { + "type": "boolean", + "description": "Whether sparse files support should be enabled for restore. Sparse file handling is only enabled if this config is true AND platform specific sparse file config is true." + }, + "filesetDownloadMergeTimeoutPerPartialInSec": { + "type": "integer", + "format": "int32", + "description": "Max number of seconds to wait for merging each download partial file, the total timeout is value * number of partials to merge." + }, + "filesetEnablePatchCache": { + "type": "boolean", + "description": "Whether PatchCache should be enabled for filesets." + }, + "filesetIngestUseSingleReplica": { + "type": "boolean", + "description": "Whether to use single-replica (mirrored) or Reed-Solomon \\ for Fileset Ingest." + }, + "filesetLocalPatchUseFlashThresholdInGB": { + "type": "integer", + "format": "int32", + "description": "Minimum amount of flash required per node to enable creating local blobs in flash. This is enabled only for passthrough currently." + }, + "filesetIngestInteractiveIOPriority": { + "type": "boolean", + "description": "Whether to set MJF IO priority to INTERACTIVE for Fileset Ingest." + }, + "filesetByHostInitialRangeForBackOffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial range for exponential backoff for FilesetByHostId insertion." + }, + "filesetByHostMaxRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of retries to add an entry into FilesetByHostId table." + }, + "filesetBackupScriptTimeoutSec": { + "type": "integer", + "format": "int32", + "description": "Number of seconds after which the pre-backup or post-backup scripts are killed if they have not completed execution." + }, + "filesetBackupScriptErrorHandling": { + "type": "string", + "description": "Default action if pre-backup or post-backup scripts fail." + }, + "filesetTemporaryMountPrefix": { + "type": "string", + "description": "Prefix for temporary fileset ext4 mounts." + }, + "filesetMaxParallelStreams": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallel streams we can have at the same time when fetching filesets." + }, + "filesetMaxVolumeSizeMb": { + "type": "integer", + "format": "int32", + "description": "Maximum size for a Fileset volume. This should be same as managed volume managedVolumeMaxDiskSizeMb." + }, + "filesetMinVolumeSizeMb": { + "type": "integer", + "format": "int32", + "description": "Minimum size for a Fileset volume. This should be same as managed volume managedVolumeMinDiskSizeMb." + }, + "filesetOptimalNumDisks": { + "type": "integer", + "format": "int32", + "description": "Optimal number of disks per snapshot." + }, + "filesetExtraSlackPercent": { + "type": "integer", + "format": "int32", + "description": "Extra percentage to be added to physical space estimates with the purpose of accommodating estimation errors." + }, + "filesetExtraSlackPercentFirstSnapshot": { + "type": "integer", + "format": "int32", + "description": "Extra percentage to be added to the first snapshot's physical space estimate with the purpose of delaying the growth in number of disks." + }, + "filesetPartitionMassiveChangeThresholdPct": { + "type": "integer", + "format": "int32", + "description": "How much a single partition must grow to force the data to be re-fetched and the volumes to be resized. Note that with repartitioning enabled we will split partitions that go over the maximum partition limit. It can happen however that a partition is initially very small (say, in the case of a single partition fileset or in case of the last partition of the fileset). For example, if a partition is initially 10GB in size and it grows to 100GB we may not force a repartition since 100GB is below the maximum partition size limit. However we may end up with 10x10GB disks inside this partition. To avoid this extreme case in which a partition is initially very small and grows a lot suddenly we use this threshold to force it to be re-fetched even if it did not exceed the maximum partition size." + }, + "filesetEnableStaggeredFulls": { + "type": "boolean", + "description": "If set, this will enable staggered forced full across partitions to ensure blob chain length for the snapshot doesn't go above staggeredForceFullMaxChainLength." + }, + "staggeredForceFullMaxChainLength": { + "type": "integer", + "format": "int32", + "description": "For a passthrough fileset snapshot, longer snapshot chain in cloud could cause performance issues. This denotes the maximum chain length for a passthrough snapshot in archival location with consolidation disabled. We will force fulls at partition level to ensure blob chain length crosses does not cross this threshold." + }, + "staggeredForceFullMaxChainLengthWithConsolidation": { + "type": "integer", + "format": "int32", + "description": "For a passthrough fileset snapshot, longer snapshot chain in cloud could cause performance issues. This denotes the maximum chain length for a passthrough snapshot in archival location with consolidation enabled. We will force fulls at partition level to ensure blob chain length crosses does not cross this threshold." + }, + "staggeredForceFullPartitionsPct": { + "type": "integer", + "format": "int32", + "description": "Maximum percentage of partitions that is set to force full once it is identified that snapshots get close to staggeredFullMaxChainLength. If total partition is 50 and staggeredFullPartitionsPct=3, we will force full on a max of 2 partitions in a snapshot." + }, + "staggeredForceFullPartitionsPctWithConsolidation": { + "type": "integer", + "format": "int32", + "description": "Maximum percentage of partitions that is set to force full once it is identified that snapshots get close to staggeredForceFullMaxChainLengthWithConsolidation. If total partition is 50 and staggeredFullPartitionsPct=3, we will force full on a max of 2 partitions in a snapshot." + }, + "filesetPartitionExt4MinDiskSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Minimum disk size to allocate for a EXT4 file system for fileset for estimating disk consumption. Modify ext4_min_disk_size if you modify this or stats calculations will get affected." + }, + "filesetEnablePartitioning": { + "type": "boolean", + "description": "If set, we will enable partitioning for fileset backups and restores. Incrementals based off a non-partitioned base will still be non-partitioned." + }, + "filesetEnableRepartitioning": { + "type": "boolean", + "description": "If set, we will enable automatic re-partitioning of filesets across snapshots. If a single partition grows too much or shrinks too much we will re-partition it." + }, + "filesetMaximumPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Maximum partition size hint. When possible, enforce that the partition size should be smaller than max_partition_size. Default value is 400 GB." + }, + "filesetMinimumPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Minimum partition size hint. When possible, enforce that the partition size should be higher than min_partition_size. Default value is 25 GB." + }, + "filesetTargetPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Partition size hint. When possible, we break up partitions in multiples of the target_partition_size. We enforce whenever possible that the partition size should be higher than min_partition_size and smaller than max_partition_size. Default value is 100 GB." + }, + "filesetScalableMaximumPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Maximum partition size hint. When possible, enforce that the partition size should be smaller than max_partition_size. Default value is 400 GB. This value is deprecates filesetMaximumPartitionSizeInMb from 5.0 onwards." + }, + "filesetScalableMinimumPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Minimum partition size hint. When possible, enforce that the partition size should be higher than min_partition_size. Default value is 50 GB. This value is deprecates filesetMinimumPartitionSizeInMb from 5.0 onwards." + }, + "filesetScalableTargetPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Partition size hint. When possible, we break up partitions in multiples of the target_partition_size. We enforce whenever possible that the partition size should be higher than min_partition_size and smaller than max_partition_size. Default value is 200 GB. This value is deprecates filesetTargetPartitionSizeInMb from 5.0 onwards." + }, + "enableDynamicPartitionSizingForBaseSnapshots": { + "type": "boolean", + "description": "Enable dynamic fileset partitioning for fileset. Enabling this will tune partition size dynamically to keep the number of partition lower. Dynamic partitioning will be done only for snapshots with no base(full snapshots)." + }, + "minNumPartitionsForDynamicPartitioning": { + "type": "integer", + "format": "int32", + "description": "Minimum number of partitions to trigger dynamic partitioning. If the number of partitions is less than this, dynamic partitioning will not be triggered for the fileset." + }, + "targetNumPartitionsForDynamicPartitioning": { + "type": "integer", + "format": "int32", + "description": "Target number of partitions for dynamic partitioning. We adjust partition size dynamically based on fileset size to ensure the number of partitions is less than this target. However we increase the partition size to a maximum of maximumTargetPartitionSizeInMb." + }, + "maxNumPartitionsForDynamicPartitioning": { + "type": "integer", + "format": "int32", + "description": "Maximum desired number of partitions for dynamic partitioning. If the number of partitions is greater than this, a warning will be logged and we will increase a stat counter. With maximumTargetPartitionSizeInMb of 2 TB, this can cover filesets upto 500 TB without warning." + }, + "maximumTargetPartitionSizeInMb": { + "type": "integer", + "format": "int32", + "description": "Maximum target partition size that can be set with dynamic partitioning." + }, + "filesetPerPartitionFetchDataRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries on a per partition basis during fetch data before failing the job." + }, + "filesetPerPartitionFetchDataUndoRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries on a per partition basis during fetch data undo for each of the pipeline stages. This is currently used only for Fileset upload undo()." + }, + "filesetEnableDataFetchThrottling": { + "type": "boolean", + "description": "Enable throttling for partitioned fileset fetch." + }, + "filesetThrottleDelay": { + "type": "string", + "description": "A pair of values that specify the minimum and maximum time, in seconds, to wait before repeating an attempt to select a node to fetch data." + }, + "filesetThrottleMaxCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of throttle events per job." + }, + "filesetThrottleEventsMinIntervalInSec": { + "type": "integer", + "format": "int32", + "description": "Minimum time interval in seconds between two of the same throttle events. It will suppress an event with the same message from being sent before the end of the interval." + }, + "filesetFetchDataInitialBackoffInMs": { + "type": "integer", + "format": "int32", + "description": "Initial amount of time to sleep while retrying a fetch data call for a specific partition." + }, + "filesetCopyDataRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries on a per patch file basis during copy data before failing the job." + }, + "filesetIgnoreRestoreErrors": { + "type": "boolean", + "description": "Tolerate failure during file write or creation during fileset restore and export jobs." + }, + "filesetNumFilesReducedWarningPercentage": { + "type": "integer", + "format": "int32", + "description": "Issues a warning if the number of files is reduced by this percentage or more." + }, + "filesetScriptStatusCheckPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which status of a script running on host is checked." + }, + "filesetScriptTerminateTimeoutMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which a script that has received SIGTERM must terminate. After this a SIGKILL will be sent." + }, + "filesetPrefixPathInSdMount": { + "type": "string", + "description": "Fileset mount root working directory." + }, + "filesetCopyDataStatusCheckMinPeriodInMs": { + "type": "integer", + "format": "int32", + "description": "Min number of millis to wait between progress status and cancellation checks during copy data stage." + }, + "filesetCopyDataStatusCheckMaxPeriodInMs": { + "type": "integer", + "format": "int32", + "description": "Max number of millis to wait between progress status and cancellation checks during copy data stage." + }, + "filesetPatchConversionsPerNode": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent patch conversions per node." + }, + "filesetMaxDataFetchNodes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of nodes to use when fetching data. If 0 there is no limit." + }, + "filesetMaxRestoreNodes": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of nodes to use when restoring data. When this value is 0, there is no maximum number of nodes." + }, + "filesetDataFetchPartitionsPerNodePerJob": { + "type": "integer", + "format": "int32", + "description": "This value specifies the maximum number of cluster-wide fileset fetch job partitions that can concurrently run on a single node per job. This is throttled by the fileset local config value of filesetDataFetchPartitionsPerNode." + }, + "filesetBackupPersistMetadataDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay after persisting of metadata to database. This is used for throttling Cassandra load for filesets with large number of partitions." + }, + "filesetBackupEnableDelayForUploadConfig": { + "type": "boolean", + "description": "Delay after persisting of metadata to database for fileset upload job configs for direct archive. This is used for throttling database load for filesets with large number of partitions." + }, + "filesetSdfsServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to SDFS service." + }, + "filesetAllowBackupWithSymlinksOnly": { + "type": "boolean", + "description": "Whether to succeed backup with symlinks only, without any files. When set to false, backup will fail if there are no files." + }, + "useNfsForNasMetadata": { + "type": "boolean", + "description": "Whether to use user-level Nfs protocol to create filesystem metadata for NAS." + }, + "useUserLevelSmbForNasBackup": { + "type": "boolean", + "description": "Whether to use user-level Smb protocol to perform NAS SMB share backups." + }, + "useUserLevelSmbForNasRestore": { + "type": "boolean", + "description": "Whether to use user-level Smb protocol to perform NAS SMB share restores." + }, + "useUserLevelSmbForNasRefreshShare": { + "type": "boolean", + "description": "Whether to use user-level Smb protocol to refresh NAS SMB share. This is set to false in environments where user-level Smb protocol authentication does not work, which can cause user account lockout or other issues." + }, + "fallbackToCifsOnUserLevelSmbRestoreFailure": { + "type": "boolean", + "description": "Whether to fallback to CIFS kernel mount after failing to restore a SMB share with user-level Smb protocol. When false we will retry with libsmb2." + }, + "userLevelSmbRestoreRetries": { + "type": "integer", + "format": "int32", + "description": "How many times to retry a restore with user-level Smb from the scala side. Only important if we do not fallback to kernel CIFS upon such a failure." + }, + "filesetMke2fsOptions": { + "type": "string", + "description": "Optional parameters passed to mke2fs command line." + }, + "filesetMountExt4Options": { + "type": "string", + "description": "Optional parameters passed to mount ext4 command line." + }, + "filesetSmbMountSupportedVersions": { + "type": "string", + "description": "Current supported SMB version for fileset SMB mount Better to list from highest to lowest." + }, + "filesetSmbMountNoShareSock": { + "type": "boolean", + "description": "Whether to enable the nosharesock mount option for CIFS shares." + }, + "filesetSmbMountNoMapPosix": { + "type": "boolean", + "description": "Whether to enable the nomapposix mount option for CIFS shares." + }, + "filesetSmbBackupUid": { + "type": "string", + "description": "Set backupuid option so SMB server can assign backup admin privilege to SMB client." + }, + "filesetSmbReparseDataEnabled": { + "type": "boolean", + "description": "Whether to enable reparse data (symlinks) for SMB shares during metadata fetch." + }, + "filesetSmbSigningEnabled": { + "type": "boolean", + "description": "Whether to enable signing for SMB shares, signing is used only when server requires signing." + }, + "filesetSmbSigningRequired": { + "type": "boolean", + "description": "Whether to force signing for SMB shares." + }, + "filesetSmbEncryptionEnabled": { + "type": "boolean", + "description": "Whether to enable encryption for SMB shares, encryption is used only when server requires global or share level encryption." + }, + "filesetSmbNtlmSessionKeyExchange": { + "type": "boolean", + "description": "Whether to perform NTLM session key exchange for SMB share authentication, some NAS servers fail SMB authentication or encryption when NTLM session key is not exchanged." + }, + "filesetSmbHiddenFolderAttributes": { + "type": "integer", + "format": "int32", + "description": "Bit mask of SMB file attributes for hidden folders. The attributes are defined in MS-SMB2 protocol specification and in src/cpp/code/search/fsmetadata/inodeparser/smb_utils.cpp. Sample attribute values are 0x1 (READONLY), 0x2 (HIDDEN), 0x4 (SYSTEM), 0x20 (ARCHIVE), 0x100 (TEMPORARY), 0x200 (SPARSE) 0x800 (COMPRESSED), 0x1000 (OFFLINE), 0x4000 (ENCRYPTED). The attributes 0x10 (DIRECTORY), 0x80 (NORMAL) and 0x400 (REPATSE_POINT) are ignored since they are special. When any one of these attributes is present on a folder, it will be excluded from SMB FileSet backup." + }, + "filesetSmbHiddenFolderNames": { + "type": "string", + "description": "Colon separated list of folder names treated as hidden folders, and excluded from SMB FileSet backup." + }, + "filesetSmbDfsMaxReferralversion": { + "type": "integer", + "format": "int32", + "description": "The maximum referral version used for SMB DFS Get Referral request. Use value 0 to disable DFS traversal." + }, + "filesetRunFsck": { + "type": "boolean", + "description": "Whether to run fsck before mounting the filesystem." + }, + "filesetFsckOptions": { + "type": "string", + "description": "Optional parameters to pass to the fsck command line." + }, + "filesetMdadmAssembleOptions": { + "type": "string", + "description": "Optional parameters passed to mdadm assemble command." + }, + "filesetRetryFetchData": { + "type": "boolean", + "description": "Whether to retry failed fetch data task during fileset backup." + }, + "filesetFetchDataRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for fileset fetch data task." + }, + "filesetFetchDataRetryForJobRunTime": { + "type": "boolean", + "description": "Whether to retry the fetch task even after it runs out retries. It will be retried for the duration equal to amount of time since job's start_running_time to first retry time." + }, + "filesetRestoreRetryForJobRunTime": { + "type": "boolean", + "description": "Whether to retry the restore data task even after it runs out of retries. It will be retried for the duration equal to the amount of time since the job's start_running_time to the first retry time." + }, + "filesetMultiplierForJobRunTime": { + "type": "integer", + "format": "int32", + "description": "Multiplier on job run time to decide if we should retry the job if filesetMultiplierForJobRunTime is set to true." + }, + "filesetRestoreDataRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for fileset execute restore task." + }, + "filesetUnmountShareRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries to unmount share during fileset backup." + }, + "filesetUnmountShareInitialTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Initial timeout value before unmounting share during fileset backup." + }, + "filesetDeleteDirectoryRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries to delete directory during fileset backup." + }, + "filesetDeleteDirectoryInitialTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Initial timeout value before deleting directory during fileset backup." + }, + "isilonApiPort": { + "type": "integer", + "format": "int32", + "description": "Port number of Isilon OneFS NAS REST API service." + }, + "isilonSnapshotExpiration": { + "type": "integer", + "format": "int32", + "description": "Expiration period of Isilon snapshot in seconds." + }, + "isilonUseSnapshotChangelist": { + "type": "boolean", + "description": "Whether to use the isilon changelist feature for backup." + }, + "netappApiPort": { + "type": "integer", + "format": "int32", + "description": "Port number of NetApp ONTAP API service." + }, + "flashbladeApiPort": { + "type": "integer", + "format": "int32", + "description": "Port number of FlashBlade API service." + }, + "netappExtraSmbOptions": { + "type": "string", + "description": "Additional options for mounting NetApp SMB shares." + }, + "filesetMaxPartitionedTasksInUploadStagePerJob": { + "type": "integer", + "format": "int32", + "description": "This value specifies the maximum number of partitioned tasks that can concurrently be in the upload stage for a fileset passthrough backup." + }, + "filesetEnableNASDAUploadThrottling": { + "type": "boolean", + "description": "Whether to enable throttling of number of partitions in upload stage per job. If enabled, only filesetMaxPartitionedTasksInUploadStagePerJob can be uploading in parallel per job." + }, + "filesetEnableNASDANetworkAndArchiveThrottle": { + "type": "boolean", + "description": "Whether to acquire network and archival throttle for fileset NAS-DA job as part job acquire phase. If this config is disabled, the total number of archival jobs (UPLOAD + NAS-DA) can exceed the throttles configured for archive operations as the throttles will be used only for UPLOAD jobs. User should consider reducing these throttles to ensure we are not overloading archival operations." + }, + "filesetSleepForUploadThrottleInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep when the upload throttle cannot be acquired." + }, + "filesetSleepForSpaceAcquisitionInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep when the required space cannot be acquired." + }, + "shouldSkipFilesetArrayBackupObjectCleanup": { + "type": "boolean", + "description": "Whether we should skip cleaning up the array volume snapshots and recreated volume groups during an array-enabled fileset backup. This can be useful when we need time to to examine these objects when debugging backup failures. These objects will require manual removal afterwards." + }, + "disconnectArrayVolumeRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries to disconnect the array volumes attached to the ingest host in an array-enabled fileset backup job." + }, + "disconnectArrayVolumeSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Duration in milliseconds to sleep between each retry to disconnect the array volumes attached to the ingest host in an array-enabled fileset backup job." + }, + "maxSpaceToReserveForPassthroughInGB": { + "type": "integer", + "format": "int32", + "description": "The maximum amount of space to reserve for a passthrough backup." + }, + "filesetEnableNASDALocalSpaceThrottling": { + "type": "boolean", + "description": "Whether to enable space throttling for direct archive. If enabled we will reserve a max of maxSpaceToReserveForPassthroughInGB or two largest partition size worth of local scratch space per direct archive job." + }, + "invalidateHostRootUuidRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry updating the HostRoot cache UUIDs." + }, + "invalidateHostRootUuidSleepInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to update the HostRoot cache UUIDs." + }, + "compareFileIdentifier": { + "type": "boolean", + "description": "Compare inode number/file id as part of determining whether or not to fetch files during backups We found under SMB and NFS, inode number/device number may not be reliable indicator of sameness of a file so this config should not be enabled under these protocols." + }, + "filesetRestoreCachingEnabled": { + "type": "boolean", + "description": "Whether to set 'caching_enabled' flag in the MJF MergedSpec for Fileset restore." + }, + "filesetDownloadCachingEnabled": { + "type": "boolean", + "description": "Whether to set 'caching_enabled' flag in the MJF MergedSpec for Fileset download." + }, + "filesetRestoreReadAheadEnabled": { + "type": "boolean", + "description": "Whether to use sequential read-ahead for Fileset restore." + }, + "filesetIngestEnableDedup": { + "type": "boolean", + "description": "Whether to use extent index and content index based deduplication for fileset ingest at partition level." + }, + "filesetIngestDedupUseRouter": { + "type": "boolean", + "description": "Whether to use content router for dedup-enabled filesets." + }, + "filesetIngestEnableSplitMjf": { + "type": "boolean", + "description": "Whether to use SplitMjf to ingest data." + }, + "filesetIngestUseMdCompatibleSplitMjf": { + "type": "boolean", + "description": "Whether to use SplitMjf to mount legacy mdadm stacks." + }, + "distributeNasPassthroughUploadAcrossNodes": { + "type": "boolean", + "description": "Flag of whether to distribute upload workloads to all nodes instead of just the one node the job is running on." + }, + "deleteChildConfigDirectory": { + "type": "boolean", + "description": "Whether to the delete the child config directory used as part of the ParallelizableReversibleTask implementation of the Fileset backup job." + }, + "parallelFilesetIngestChildTaskRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry each child job in the parallel ingest task for the Fileset backup job." + }, + "validateNasCertificate": { + "type": "boolean", + "description": "Whether to validate the NAS server's certificate during backup." + }, + "alertPercentageThresholdForDeletedInodes": { + "type": "integer", + "format": "int32", + "description": "The percentage threshold to alert the deleted inodes between backups. When the percentage of deleted inodes exceeds the threshold, alert is sent." + }, + "failJobIfNoFilesFound": { + "type": "boolean", + "description": "If scan phase reports total number of files to be fetched as zero then fail the job." + }, + "backupJobFileFailureThreshold": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage threshold. Backup jobs fails when the percentage of failed backups for files or data exceeds the threshold." + }, + "maxBackupFileSizeInGB": { + "type": "integer", + "format": "int32", + "description": "The maximal file size in GB supported during backup. If the file size is bigger than the value, the file will be ignored during backup. To disable this parameter, set it to be zero." + }, + "filesetDedupCalculatorJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs of fileset dedup calculator per node." + }, + "filesetSnapshotVerificationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent of fileset snapshot verification jobs per node." + }, + "filesetSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the fileset snapshot job." + }, + "filesetRemoteMetadataScanParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in metadata computation during the remote metadata scan." + }, + "filesetNasRemoteMetadataScanParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in metadata scan of NAS share if kernel NFS/CIFS is used." + }, + "useNetappSnapdiff": { + "type": "boolean", + "description": "Whether to use the NetApp Snapdiff feature for backup." + }, + "filesetNasRemoteMetadataAsyncScanParallelism": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the level of parallelism the agent uses in the metadata scan of the NAS share when libnfs or libsmb2 are in use. The libnfs and libsmb2 use an asynchronous mechanism for metadata scans which requires fewer threads." + }, + "filesetRestoreWriteParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in data restore when sending data." + }, + "filesetNasRestoreWriteParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in data restore when sending data to a NAS share." + }, + "filesetRestoreReadParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in data restore when reading data from SDFS." + }, + "filesetEnableFingerprinting": { + "type": "boolean", + "description": "If set, we will enable fingerprinting for incremental backups." + }, + "filesetEnableVss": { + "type": "boolean", + "description": "If set, we will take VSS snapshots in Windows." + }, + "nfsSupportedVersionsForFileset": { + "type": "string", + "description": "List of currently supported NFS versions to mount NFS for filesets. The supported versions will be tried in the list order during mount. NFS3 is listed a head of NFS4 as the performance of NFS3 performs 30% more than NFS4 for small files." + }, + "envoyFilesetRbaBulkStatBatchFiles": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to accumulate in a single bulk stat batch during files' metadata scan for a Fileset backup through the Envoy VM." + }, + "enforceGlobalRbaBackupBlacklistPaths": { + "type": "boolean", + "description": "Whether to enforce the global fileset config rbaBackupBlacklistPaths. If set to true, this global config blacklist will overwrite the per-host blacklist set in rba flag." + }, + "rbaBackupBlacklistPaths": { + "type": "string", + "description": "Comma-separated list of blacklist paths that skipped internally during backup." + }, + "filesetDedupCalculatorFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of the dedup calculator job." + }, + "internalBackupAgentPerJobDirectoryCacheSize": { + "type": "integer", + "format": "int32", + "description": "The directory cache size for one backup job used by the internal backup agent." + }, + "internalBackupAgentGlobalJobDirectoryCacheSize": { + "type": "integer", + "format": "int32", + "description": "The cache capacity of the per job directory cache used by the internal backup agent." + }, + "internalBackupAgentMaxFileDescriptors": { + "type": "integer", + "format": "int32", + "description": "The maximum number of files the internal backup agent can open." + }, + "filesetSnapshotVerificationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. The fileset snapshot verification job for each data source runs at the specified interval." + }, + "isFilesetSnapshotVerificationEnabled": { + "type": "boolean", + "description": "A Boolean that determines whether the fileset snapshot verification job is enabled." + }, + "maxNumberOfPartitionsToVerify": { + "type": "integer", + "format": "int32", + "description": "Determines the max number of partitions that will be verified in a single snapshot verification job instance." + }, + "enableFstrimForFilesetSnapshots": { + "type": "boolean", + "description": "Determines if we forcefully need to run fstrim on every fileset." + }, + "forceFetchOnSnapshotVerificationFailure": { + "type": "boolean", + "description": "Determines if we want to force-fetch a partition if snapshot verification has failed on that partition." + }, + "forceUseSnapDiffV1": { + "type": "boolean", + "description": "A Boolean that specifies if only SnapDiff V1 is used to generate SnapDiff data." + }, + "backupAgentInternalVerificationMountRoot": { + "type": "string", + "description": "The location inside the Rubrik node where snapshot partitions will be mounted for the snapshot verification job. The mount root will be contained in the internal backup agent's root directory." + }, + "restoreNumLoopWorkers": { + "type": "integer", + "format": "int32", + "description": "Number of loop device workers to be used during restore." + }, + "refreshFailoverClusterAppNumRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for failover cluster application refresh jobs." + }, + "refreshFailoverClusterAppJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent failover cluster application refresh jobs per node." + }, + "refreshFailoverClusterAppJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval, in minutes, of the refresh failover cluster service jobs." + }, + "throttleFailoverClusterAppMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshots per failover cluster application." + }, + "filesetOptimizeZeroBytePartitionFetch": { + "type": "boolean", + "description": "Optimize fetch for partition by not invoking Rsync phase when the incremental bytes to be fetched is zero." + }, + "enableLightWeightPfcForFileset": { + "type": "boolean", + "description": "Whether to enable lightweight pfc for fileset snapshots This will expose patch files backed as journals - Journal VPFs as snapshots. These journal VPF's will be converted to patch files in the background." + }, + "filesetUsePipelinedRestore": { + "type": "boolean", + "description": "Whether or not to use the restore pipeline to perform fileset restore." + }, + "enableSnapshotVerificationForPublicCloudNASDAFilesets": { + "type": "boolean", + "description": "Whether to enable fileset snapshot verification job to be run for passthrough filesets which are archiving to public cloud." + }, + "useVirtualIteratorBasedChangelistProcessing": { + "type": "boolean", + "description": "Whether to use the optimised virtual iterator based changelist FMD processing." + }, + "filesetRestorePartitionsPerNodePerJob": { + "type": "integer", + "format": "int32", + "description": "The number of partitions allowed to be restored on a single node for a given job." + }, + "filesetPipelinedRestorePrepareWriteStageNumThreads": { + "type": "integer", + "format": "int32", + "description": "The number of threads used to perform the Prepare Write stage of the fileset pipeline restore." + }, + "filesetPipelinedRestoreChunkFileStageNumThreads": { + "type": "integer", + "format": "int32", + "description": "The number of threads used to perform the Chunk File stage of the fileset pipeline restore." + }, + "filesetPipelinedRestoreReadDataStageNumThreads": { + "type": "integer", + "format": "int32", + "description": "The number of threads used to perform the Read Data stage of the fileset pipeline restore." + }, + "filesetPipelinedRestoreCompressDataStageNumThreads": { + "type": "integer", + "format": "int32", + "description": "The number of threads used to perform the Compress Data stage of the fileset pipeline restore." + }, + "filesetPipelinedRestoreSetAttrStageNumThreads": { + "type": "integer", + "format": "int32", + "description": "The number of threads used to perform the Set Attr stage of the fileset pipeline restore." + }, + "enableSnapshotVerificationForPrivateCloudNASDAFilesets": { + "type": "boolean", + "description": "Whether to enable snapshot verification for NASDA filesets which are archiving to private cloud. These don't have egress costs." + }, + "saveFmdFromFailedFilesetBackup": { + "type": "boolean", + "description": "If set, saves FMDs from latest failed backup for the Fileset." + }, + "nfsSupportedOptions": { + "type": "string", + "description": "Nfs supported options for fileset operations. List of options is separated by a semicolon and the list of valid option values is comma separated." + }, + "nfsDefaultOptions": { + "type": "string", + "description": "Nfs default options for fileset operations. CSV list of options." + }, + "enableSimulatedNasHosts": { + "type": "boolean", + "description": "Decides if adding simulated nas shares should be enabled." + }, + "allowConcurrentChangelistJobs": { + "type": "boolean", + "description": "Whether to allow concurrent changelist jobs on the same Isilon server. Setting this to true does not mean Isilon server will automatically run concurrent jobs. It only allows Rubrik cluster to send changelist job creation with concurrency flag. Isilon version must be >= 8.2.2 and two internal configs have to be set properly to allow concurrent changelist jobs." + }, + "deleteExtraFilesOnSnapshotVerificationFailure": { + "type": "boolean", + "description": "Determines if we want to delete extra files in a partition if snapshot verification finds leaked ext4 files." + }, + "enableLinuxSparseFileSupport": { + "type": "boolean", + "description": "Whether sparse files support should be enabled for Linux host. Sparse file support on Linux is enabled only if this config is true AND rbaBulkReaderEnableSparseFiles/ rbaBulkWriterEnableSparseFiles is true." + }, + "enableSunOSSparseFileSupport": { + "type": "boolean", + "description": "Whether sparse files support should be enabled for SunOS host. Sparse file support on SusOS host is enabled only if this config is true AND rbaBulkReaderEnableSparseFiles/ rbaBulkWriterEnableSparseFiles is true." + }, + "useSnapMirrorFullLabelDuringForceFull": { + "type": "boolean", + "description": "Determines if the SnapMirror snapshot is selected based on the configured full label during a force full backup. If false, the SnapMirror destination volume backup uses a configured incremental label to select the SnapMirror snapshot. However, the full label may match a SnapMirror snapshot that is older than the last snapshot used for backup when this is set to true. If this occurs, SnapDiff does not work, and a regular scan is performed on the tip of the volume." + }, + "useTotalsForBackupFailureThreshold": { + "type": "boolean", + "description": "Whether to use the total number of files or bytes in the fileset as a basis for calculating the backup job failure threshold instead of only the changed amount of files or bytes." + }, + "enableFileSharding": { + "type": "boolean", + "description": "If set, we will enable sharding of single file into multiple partitions." + }, + "setEnableFileShardingOnFull": { + "type": "boolean", + "description": "If true, we will set the enableFileSharding as true for filesets when they do a full snapshot." + }, + "enableSqlVssWritersExclusion": { + "type": "boolean", + "description": "If set, we will enable the exclusion of the SQL VSS writer components when taking Fileset VSS snapshot if the SQL component files are not present in the fileset." + }, + "metadataScanThrottlingIntervalSeconds": { + "type": "integer", + "format": "int32", + "description": "The frequency at which the throttling level for metadata scan is reevaluated." + }, + "enableThreadPoolThrottler": { + "type": "boolean", + "description": "Determines if the thread pool throttler will be enabled. If the sleep throttler is not enabled as well, there will be no throttling after the thread pool reaches its minimum size of 1 thread." + }, + "enableSleepThrottler": { + "type": "boolean", + "description": "Determines if the sleep throttler will be enabled. If the thread pool throttler is also enabled, the sleep throttler will only be active when the thread pool size reaches 1 thread." + }, + "metadataScanThrottlingHighLoadLatencyThresholdUs": { + "type": "integer", + "format": "int32", + "description": "The Host or NAS share is considered to be under high load if the average latency per request during the metadata scan phase goes above this threshold. The threshold is 100ms by default." + } + } + }, + "GlobalForgeConfig": { + "type": "object", + "properties": { + "proxyConfiguration": { + "type": "string", + "description": "Proxy Configuration." + }, + "log4jMonitorIntervalSeconds": { + "type": "integer", + "format": "int32", + "description": "monitorInterval attribute in log4j2.xml." + }, + "log4jTraceLoggers": { + "type": "string", + "description": "Comma separated list of logger names to enable TRACE level logging. For example akka.io.TcpListener,com.scaledata.metadatastore.CassandraEntityManager,com.scaledata.vmclient.vmware.impl.VmwareToolImpl,com.scaledata.util.package,com.amazonaws." + }, + "rubrikEnterpriseNumber": { + "type": "integer", + "format": "int32", + "description": "Rubrik Enterprise Number can be found here 'https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers'." + }, + "restoreLegacySyslogStringComposition": { + "type": "boolean", + "description": "Earlier, we used structured syslog fields by composing fields as a string. E.g., instead of sending the enterpriseNumber as 49929, and eventName as eventDetail, we added eventDetail@49929 to the message string. In the oft chance that a customer using syslog integration saw this flaw, didn't report it to Rubrik and hacked a solution, setting this config will restore the earlier behavior. Support should educate the customer to remove this special processing and be standards compliant." + }, + "syslogAppName": { + "type": "string", + "description": "App name to use for messages sent to remote syslog servers." + }, + "syslogFormat": { + "type": "string", + "description": "Format to use for messages sent to remote syslog servers." + }, + "syslogMdcId": { + "type": "string", + "description": "MDC ID to use for messages sent to remote syslog servers." + }, + "syslogMsgId": { + "type": "string", + "description": "MSGID to use for event messages sent to remote syslog servers." + }, + "syslogAppLogMsgId": { + "type": "string", + "description": "MSGID to use for app logs sent to remote syslog servers." + }, + "syslogNewLine": { + "type": "boolean", + "description": "Whether to send a new line with each remote syslog message." + }, + "syslogGnuTlsAuthMode": { + "type": "string", + "description": "Rsyslog gtls driver authentication mode." + }, + "diskCheckerPartitionMountRate": { + "type": "integer", + "format": "int32", + "description": "Mount device partitions every this many iterations in the DiskChecker." + }, + "verifyBrikSerialsOnAdd": { + "type": "boolean", + "description": "Sanity check brik serials while adding a node." + }, + "blinkLedForDiskStatus": { + "type": "boolean", + "description": "Change LED state based on disk status." + }, + "blockCassandraForRemovedNodes": { + "type": "boolean", + "description": "Block Cassandra traffic to and from removed nodes using iptables." + }, + "autoRemoveJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of auto remove jobs per node." + }, + "autoRemoveDeadNodeJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of job to automatically remove dead node." + }, + "autoRemoveDeadNodeJobNotificationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency at which notifications are fired for auto removed nodes." + }, + "autoRemoveMaxAllowed": { + "type": "integer", + "format": "int32", + "description": "Number of auto node removals the cluster is allowed to perform." + }, + "autoRemoveDeadNodeResurrectionWindowMultiplier": { + "type": "integer", + "format": "int32", + "description": "How stale should a node be before we auto remove it." + }, + "autoRemoveMinNodesAtStart": { + "type": "integer", + "format": "int32", + "description": "How many nodes should a cluster have for auto remove to run." + }, + "autoInstallOnAddEnabled": { + "type": "boolean", + "description": "Is auto install enabled on node add." + }, + "autoInstallMinVersion": { + "type": "string", + "description": "Minimum supported version for auto install." + }, + "autoInstallCompleteRetries": { + "type": "integer", + "format": "int32", + "description": "How many 10 second intervals to check if auto install completed." + }, + "addNodesPollingPeriodSecs": { + "type": "integer", + "format": "int32", + "description": "Polling period for AddNodes job status." + }, + "decommissionRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Max retry attempts for DecommissionNode job." + }, + "sdfsDecommissionPollingPeriodSecs": { + "type": "integer", + "format": "int32", + "description": "Polling period for DecommissionNode job." + }, + "preferForceRebootByDefault": { + "type": "boolean", + "description": "Prefer a forced reboot by default in reboot utilities." + }, + "platformsSkippingSetChassisInfoOnReplaceNode": { + "type": "string", + "description": "On appliances where there is a single node per chassis, we can skip this step. This is helpful for platforms where the current method of using ipmicfg to set the chassis serial does not work." + }, + "hardwareCheckCmd": { + "type": "string", + "description": "Script used to check hardware health." + }, + "hardwareCheckPeriod": { + "type": "integer", + "format": "int32", + "description": "Frequency of hardware health checks in milliseconds." + }, + "hardwareCheckDedupeWindow": { + "type": "integer", + "format": "int32", + "description": "specifies an interval in milliseconds. Multiple errors of the same type that occur in this interval generate a single notification." + }, + "diskUnhealthyDedupeWindow": { + "type": "integer", + "format": "int32", + "description": "specifies an interval in milliseconds. Multiple errors of the same type that occur in this interval generate a single notification." + }, + "networkInterfaceForIpv6Config": { + "type": "string", + "description": "The interface on which Ipv6 address is configured on." + }, + "stalenessThresholdInMillisForLowSeverityNodeDownNotification": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Rubrik CDM generates a low severity 'node down' notification after this interval. The default interval is ten minutes." + }, + "stalenessThresholdInMillisForHighSeverityNodeDownNotification": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Rubrik CDM generates a high severity 'node down' notification after this interval. The default interval is two hours." + }, + "stalenessThresholdInMillisForRepeatedNodeDownNotifications": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Rubrik CDM begins to repeat 'node down' notifications after this interval. The default interval is 24 hours." + }, + "repeatedNodeDownNotificationFrequencyInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Rubrik CDM repeats 'node down' notifications every time this interval elapses. The default interval is 24 hours." + }, + "performNodeHealthChecksFromBadNodes": { + "type": "boolean", + "description": "Are node health checks are performed from nodes in BAD state." + }, + "nodeUpNotificationsEnabled": { + "type": "boolean", + "description": "Specifies whether node up notifications are enabled." + }, + "lowSeverityNodeDownNotificationsEnabled": { + "type": "boolean", + "description": "Specifies whether low severity node down notifications are enabled." + }, + "highSeverityNodeDownNotificationsEnabled": { + "type": "boolean", + "description": "Specifies whether high severity node down notifications are enabled." + }, + "defaultPageSize": { + "type": "integer", + "format": "int32", + "description": "Default size for pagination in cassandra, a smaller value should be used for tables with large rows." + }, + "maxRandomSleepBetweenPagesInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep time used for picking the random duration when pacing paginated iterate queries." + }, + "floatingIpAtomicActionRetry": { + "type": "integer", + "format": "int32", + "description": "Retry number to perform when certain atomic action, for example locking on a floating IP has failed." + }, + "floatingIpAtomicActionRetryDelayMillis": { + "type": "integer", + "format": "int32", + "description": "Retry delay for floating IP atomic action." + }, + "floatingIpLockGracePeriodMillis": { + "type": "integer", + "format": "int32", + "description": "Grace period for a floating IP lock attempt to timeout." + }, + "snmpMibUpdateFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes that SNMP MIB updater runs periodically." + }, + "ignoreOpenStripesForSingleNodeDecommissions": { + "type": "boolean", + "description": "Ignore open stripes with replicas on nodes being decommissioned, while decommissioning a single node." + }, + "ipHostsMappings": { + "type": "string", + "description": "A json serialized map between IP address and hostnames. Values will be updated to /etc/hosts by node monitor." + }, + "dnsPreferIpv4OverIpv6": { + "type": "boolean", + "description": "When DNS can resolve a hostname into IPv4 and IPv6, it prefers IPv4." + }, + "tcpSackValue": { + "type": "integer", + "format": "int32", + "description": "Value to use for TCP selective acknowledgment." + }, + "ipRouteGetCommandTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds when executing 'ip route get hostIpAddress'." + }, + "decommissionNotificationDedupeWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Used as dedupe window for receipients to receive updates on node decommission status. Can be updated to increase frequency of notifications, but default to 1 day." + }, + "decommissionProgressCheckFrequencySecs": { + "type": "integer", + "format": "int32", + "description": "Period in seconds to check if decommission has progressed. Defaults to 24 hours." + }, + "cdmUpgradeTarballUtilCopyBufferSizeInKB": { + "type": "integer", + "format": "int32", + "description": "Buffer size, in KiB, to be used while distributing the CDM tarball across nodes." + }, + "numRetriesForStageCdmSoftwareJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the job to stage CDM software." + }, + "stageCdmSoftwareDownloadJobProgressUpdateThresholdInBytes": { + "type": "integer", + "format": "int32", + "description": "Minimum number of bytes that must be downloaded from a remote URL before updating stage cdm software job progress." + }, + "periodicUpgradePrechecksTableSizeLimit": { + "type": "integer", + "format": "int32", + "description": "The upper limit on the number of rows to store in the periodic_upgrade_prechecks table." + }, + "periodicUpgradePrechecksJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of periodic upgrade prechecks job in minutes." + }, + "periodicUpgradePrechecksJobMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of instances per node for the periodic upgrade prechecks job." + }, + "maxWaitTimeForRunPrechecksTaskToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to wait for a run prechecks task to terminate." + }, + "sleepTimeForCheckingPeriodicPrechecksStatusInMilliSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency in milliseconds to check the periodic prechecks status." + }, + "disableRubrikIpmiUser": { + "type": "boolean", + "description": "Disable the IPMI username RUBRIK." + }, + "disableResetNode": { + "type": "boolean", + "description": "Disable the reset command to Admin in the CLI." + }, + "disablePoweroffCluster": { + "type": "boolean", + "description": "Disable the poweroff cluster command to Admin in the CLI." + }, + "skipUrlValidationForDownloadSoftwareJob": { + "type": "boolean", + "description": "Skip URL validation while downloading a CDM software package." + }, + "downloadSoftwareConnectionTimeoutMillis": { + "type": "integer", + "format": "int32", + "description": "Connection timeout in milliseconds while downloading a CDM software package." + }, + "enablePreStageUpgrade": { + "type": "boolean", + "description": "Feature flag to turn on pre-stage upgrade." + }, + "disablePeriodicPrechecksWithoutTarball": { + "type": "boolean", + "description": "When upgrade tarball is absent, disable the periodic prechecks." + }, + "upgradeEndBufferTimeInSec": { + "type": "integer", + "format": "int32", + "description": "After upgrade ends, the time in seconds it should wait before issuing another upgrade command." + }, + "maxWaitTimeForStageSoftwareTaskToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to wait for stage software task to complete before the job terminates." + }, + "sleepTimeForUpgradeStatusInMilliSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency in milliseconds to check the upgrade status." + }, + "maxWaitTimeForUpgradeStatusInMilliSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds to wait for upgrade status before stopping retries." + }, + "upgradeVersionCheckBinaryPath": { + "type": "string", + "description": "Location of upgrade version check binary." + }, + "rollbackableFilePath": { + "type": "string", + "description": "Location of file whose existence denotes the cluster is running in rollbackable mode." + }, + "maxDaysToRollbackAfterSuccess": { + "type": "integer", + "format": "int32", + "description": "The number of days after successful upgrade CDM Software rollback to previous version is allowed. Value of 0 indicates the feature is disabled." + }, + "parallelNodeAddEnabled": { + "type": "boolean", + "description": "Determines whether to run node addition in parallel." + }, + "parallelNodeAddPollingPeriodSecs": { + "type": "integer", + "format": "int32", + "description": "Polling period for parallel node add operations." + }, + "parallelNodeAddNumJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Determines how many node additions a node in the cluster can drive. Should be increased if more nodes are being added than are present in the cluster." + }, + "prestageMinCpuPct": { + "type": "integer", + "format": "int32", + "description": "The amount of CPU shares to be provisioned for the PreStage upgrade workflow. eg. Value of 25 implies 25% of CPU resources. If other processes are not using the CPUs, then upgrade workflow can potentially utilise other CPU resources in the system." + }, + "enableParallalPrecheck": { + "type": "boolean", + "description": "Enables running prechecks in parallel when upgrading." + }, + "prestageImagingNodesInParallelPct": { + "type": "integer", + "format": "int32", + "description": "The percentage of nodes that will do imaging in parallel, in the pre-stage part of upgrade workflow." + }, + "enableRollingUpgrade": { + "type": "boolean", + "description": "Feature flag to turn on rolling upgrade." + }, + "quiesceTimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "The quiesce time in minutes during rolling upgrade. Upgrade service reads this configuration and set the node status to UPGRADE when quiesce phase ends." + }, + "upgradeLockFilePath": { + "type": "string", + "description": "Location of the upgrade lock file." + }, + "bulkNodeRemovalEnabled": { + "type": "boolean", + "description": "Determines whether to allow bulk node removal." + }, + "bulkNodeRemovalRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries for bulk node removal job." + }, + "bulkNodeRemovalChildRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for bulk node removal child jobs." + }, + "enableVlanSegregation": { + "type": "boolean", + "description": "Feature flag to enable Vlan segregation feature." + }, + "enableCustomHostname": { + "type": "boolean", + "description": "Feature flag to enable custom hostname feature." + } + } + }, + "GlobalGalactusConfig": { + "type": "object", + "properties": { + "refreshCloudNativeSourceJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of refresh cloud native source jobs in minutes." + }, + "gcCloudNativeResourcesJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of Gc cloud native resources jobs in minutes." + }, + "cloudNativeVmSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the cloud native vm snapshot job." + }, + "maxAwsCloudNativeCreateSnapshotJobClusterWide": { + "type": "integer", + "format": "int32", + "description": "The maximum number of cloud native virtual machine snapshot jobs that can run on a cluster at a given time." + }, + "maxAwsCloudNativeCreateSnapshotJobPerSource": { + "type": "integer", + "format": "int32", + "description": "The maximum number of cloud native virtual machine snapshot jobs for a given source that can run on a cluster at a given time." + }, + "maxCloudNativeGhostVmCount": { + "type": "integer", + "format": "int32", + "description": "Max allowed count for ghost cloud native virtual machines." + }, + "nativeSnapshotCreationPollFrequencyInMs": { + "type": "integer", + "format": "int32", + "description": "Poll frequency to check for native snapshot to be available after triggering snapshot creation. Higher frequency can cause too many API calls in a short duration." + }, + "snapshotIntegrityFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency to check if all cloud native snapshots in brik actually exist in AWS." + }, + "isConnectivityCheckForAddSourceEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the add cloud native source task." + }, + "isConnectivityCheckForRefreshEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the refresh task." + }, + "isConnectivityCheckForGCLeakedResourcesEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the gc leaked resources task." + }, + "isConnectivityCheckForGCGlobalEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the gc global task." + }, + "isConnectivityCheckForDeleteSourceEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the delete source task." + }, + "isConnectivityCheckForInplaceRestoreEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the inplace restore task." + }, + "isConnectivityCheckForExportVmEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the export vm task." + }, + "isConnectivityCheckForCreateVmSnapshotEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for the create vm snapshot task." + }, + "isConnectivityCheckForIndexingEnabled": { + "type": "boolean", + "description": "Flag for check connectivity to try requests for an indexing task." + }, + "maxWaitTimeForCloudNativeVmImageCreationInMins": { + "type": "integer", + "format": "int32", + "description": "Time to wait for the CloudNativeVm native snapshot to be created in the cloud, in minutes. (native snapshot should be in available state)." + }, + "nativeDiskStatePollFrequencyInMs": { + "type": "integer", + "format": "int32", + "description": "Poll frequency in milliseconds to get the CloudNativeDisk state." + }, + "maxWaitTimeForCloudNativeDiskAttachmentInSecs": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for a CloudNativeDisk to get attached to a CloudNativeVm." + }, + "maxWaitTimeForCloudNativeDiskToAvailableInSecs": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for a CloudNativeDisk to become available." + }, + "maxWaitTimeToAcquireDiskAttachmentThrottleInMins": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for acquiring the throttle to attach disks to Cloud Native Vm." + }, + "awsCloudNativeVmsToUploadInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of aws native vms to upload in parallel for migration." + }, + "awsCloudNativeIndexToUploadInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of aws native indexes to upload in parallel for migration." + }, + "sleepTimeToAcquireDiskAttachmentThrottleInMs": { + "type": "integer", + "format": "int32", + "description": "Frequency to try to acquire throttle to attach disks to Cloud Native Vm." + }, + "nativeVmStatePollFrequencyInMs": { + "type": "integer", + "format": "int32", + "description": "Poll frequency in milliseconds to get the CloudNativeVm state." + }, + "maxWaitTimeForCloudNativeVmToStopInMins": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for a CloudNativeVm to stop." + }, + "maxWaitTimeForCloudNativeVmToStartInMins": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for a CloudNativeVm to start." + }, + "maxWaitTimeForCloudNativeVmToTerminateInMins": { + "type": "integer", + "format": "int32", + "description": "Time in minutes to wait for a CloudNativeVm to terminate." + }, + "maxWaitTimeForRemoteIndexJobToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to wait for a RemoteIndexCloudNativeDisks job to terminate." + }, + "sleepTimeForCheckingRemoteIndexJobInstanceInMilliSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency to check the remote job instance status." + }, + "maxWaitTimeForRemoteDownloadJobToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to wait for a RemoteCloudNativeCreateDownloadable job to terminate." + }, + "sleepTimeForCheckingRemoteJobInstanceInMilliSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency to check the remote job instance status." + }, + "remoteIndexFileChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Chunk size used to read remote FMD file, in MB. Max allowed size is 15 MB." + }, + "remoteDownloadableFileChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Chunk size used to read remote FMD file, in MB. Max allowed size is 15 MB." + }, + "maxWaitTimeForRemoteJobToCancelInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to wait for a remote cloud native job to cancel." + }, + "sleepTimeForCheckingRemoteJobInstanceCancelInSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency to check if remote job is cancelled." + }, + "gracePeriodForRemoteJobLogCollectionInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to subtract from the remote job start time while collecting logs. This is necessary to account for the possible skewing of clocks of primary Rubrik Cluster and remote storm instance." + }, + "batchingTimeoutForIndexJobInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time to wait for batching before launching a storm for the request." + }, + "shouldIndexCloudNativeSnapshots": { + "type": "boolean", + "description": "Boolean value to indicate if cloud native snapshots to be indexed." + }, + "shouldCopyTagsToCloudNativeSnapshots": { + "type": "boolean", + "description": "Boolean value to indicate if tags from cloud native vm should be copied to cloud native snapshot. This Boolean also defines if the the tags from EBS volume should be copied to disk snapshots." + }, + "maxRetryableExceptionCountForCloudNativeSnapshotCompletion": { + "type": "integer", + "format": "int32", + "description": "The job-task for cloud native vm snapshot completion should be retried in the case of retryable exceptions. This parameter specifies the number of retries of the task." + }, + "maxGenericExceptionCountForCloudNativeSnapshotCompletion": { + "type": "integer", + "format": "int32", + "description": "The job-task for cloud native vm snapshot completion should be retried in the case of fatal exceptions. This parameter specifies the number of retries of the task." + }, + "retrySleepInSecondsForCloudNativeSnapshotCompletion": { + "type": "integer", + "format": "int32", + "description": "Threshold in seconds for the cloud native vm snapshot completion job thread to sleep before retrying again." + }, + "persistAtomicallyRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of retries to persist atomically." + }, + "minTimeLapseForCloudNativeVmGcInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum time lapse for cloud native virtual machine to be garbage collected in minutes." + }, + "minTimeLapseForCloudNativeDiskGcInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum time lapse for cloud native virtual disks to be garbage collected in minutes." + }, + "minTimeLapseForCloudNativeVmSnapshotGcInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum time lapse for cloud native virtual machine snapshot to be garbage collected in minutes." + }, + "minTimeLapseForCloudNativeDiskSnapshotGcInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum time lapse for cloud native virtual disk snapshot to be garbage collected in minutes." + }, + "maxWaitTimeForSystemPauseInMins": { + "type": "integer", + "format": "int32", + "description": "Max wait time for the system pause to take effect for the given sourceId. System pause implies that all running jobs for this source come to a pause." + }, + "stormNotFoundNotificationArbitrationTimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "Threshold time for sending notification if storm image is not found." + }, + "maxConcurrentChunksDownload": { + "type": "integer", + "format": "int32", + "description": "Number of file chunks to be downloaded in parallel while downloading a file." + }, + "defaultIndexSizePerDiskInGB": { + "type": "integer", + "format": "int32", + "description": "Assumed default size of the index for a cloud native virtual machine per disk in GBs." + }, + "indexSizeFactorPerDisk": { + "type": "number", + "format": "double", + "description": "Size factor for the index file compared with the logical size of a cloud native disk." + }, + "maxPossibleIndexSizeInGB": { + "type": "integer", + "format": "int32", + "description": "Upper limit on possible size of index file. Used in providing storage estimate to storm manager." + }, + "shouldDisableCloudNativeAdoption": { + "type": "boolean", + "description": "Boolean value to indicate if cloud native adoption should be disabled. If this value is set then cloud native feature will be disabled for all the customers except those who either have at least one active AWS account added or who are C2S customers (governed by the config enableDcaArchivalLocationCreation)." + } + } + }, + "GlobalGcpConfig": { + "type": "object", + "properties": { + "googleUploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "For Google we upload files in chunks.This specifies the size of that chunk. Previously google sdk had size limit of less 2GB, which was fixed subsequently." + }, + "googleStreamWriteChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "For every resumable upload to google storage bucket the file is split into multiple chunks and each chunk is sent in one PUT request. Google supports chunk sizes from 256KB to 32MB. The default value set by google is 10MB." + }, + "googleResumableMultipartUploadEnabled": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for google." + }, + "gpnUserAgentString": { + "type": "string", + "description": "Google User agent string used to track data uploaded by Rubrik on Google side." + } + } + }, + "GlobalHawkeyeConfig": { + "type": "object", + "properties": { + "indexSnappableJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval in minutes for the snappable index job." + }, + "indexMaxDirectoriesToDelete": { + "type": "integer", + "format": "int32", + "description": "Maximum number of index directories to delete stored in snappable." + }, + "indexSnappableJobMaxSnapshotsPerRun": { + "type": "integer", + "format": "int32", + "description": "Maximum number of snapshots without index processed per index run." + }, + "indexSnappableJobMaxSnapshotsWithIndexPerRun": { + "type": "integer", + "format": "int32", + "description": "Maximum number of snapshots with index processed per index run." + }, + "indexSnappableJobMaxAttemptsPerSnapshot": { + "type": "integer", + "format": "int32", + "description": "Maximum number of attempts to index a snapshot before we quit." + }, + "indexSnappableJobUseUML": { + "type": "boolean", + "description": "Whether to use UML for Indexing." + }, + "indexSnappableUmlGuestMemoryInMb": { + "type": "integer", + "format": "int32", + "description": "Amount of memory for UML guest in MB." + }, + "indexSnappableThrottleDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay in microseconds for a job to wait in queue in case there are more than one job running for a given snappable." + }, + "indexSnappableJobUseCache": { + "type": "boolean", + "description": "Whether to enable the SDFS cache for indexing jobs." + }, + "indexSnappableJobCacheNumDiskLimit": { + "type": "integer", + "format": "int32", + "description": "The number of disks of a VM that index cache supports, any additional disks in the VM will not use the index cache." + }, + "sprayDownloadCleanupInDays": { + "type": "integer", + "format": "int32", + "description": "Remove files from spray download folder that are older than this many whole days, fractional days are rounded down." + }, + "indexSnappableEnablePatchCache": { + "type": "boolean", + "description": "Whether PatchCache should be enabled for Indexing." + }, + "analyzeSnappableJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval in minutes for the analyze snappable job." + }, + "incompressibleFileTypes": { + "type": "string", + "description": "A list of file extensions representing files that are incompressible." + }, + "snappableSearchDefaultLimit": { + "type": "integer", + "format": "int32", + "description": "Number of results to return from snappable search query." + }, + "globalSearchPrefixPath": { + "type": "string", + "description": "Search root snapshot directory." + }, + "globalSearchNumShards": { + "type": "integer", + "format": "int32", + "description": "Number of shards to be created in the file system." + }, + "globalSearchResultSize": { + "type": "integer", + "format": "int32", + "description": "Number of results to return from global search query." + }, + "userSearchAuditDebounceDelay": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds of user inactivity before publishing a user audit event for a search." + }, + "indexAlertEnabled": { + "type": "boolean", + "description": "Whether to alert on index failures based on the lookback and failure threshold rules." + }, + "indexAlertLookBackPeriodDays": { + "type": "integer", + "format": "int32", + "description": "Number of days to look back to determine if a snappable is valid for index alerts. If a snappable is consistently unindexable and does not have any indexed snapshots within the look back period, index failure will not be alerted." + }, + "indexAlertFailureThresholdDays": { + "type": "integer", + "format": "int32", + "description": "Number of days of unindexed snapshots to tolerate before raising index alert for a snappable. Index failure alert is raised if the snappable has indexed snapshots within the look back period, but has failed to index all recent snapshots within the threshold days." + }, + "indexSnapshotsUsingBlockDevices": { + "type": "boolean", + "description": "Boolean flag to determine if block device based indexing of snapshots is enabled or not." + }, + "filesystemsForBlockDeviceBasedIndexing": { + "type": "string", + "description": "Comma Separated Value of file systems for which block device based indexing is supported." + }, + "mountFailoverOnBlockDeviceFailure": { + "type": "boolean", + "description": "Boolean flag to determine if we should failover to mount based indexing if block device based indexing fails." + } + } + }, + "GlobalHypervConfig": { + "type": "object", + "properties": { + "hypervSupportedConfigurationVersions": { + "type": "string", + "description": "List of supported configuration versions for hyperv virtual machines." + }, + "hypervExportJobTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for disk copy during create, export, restore snapshot jobs." + }, + "hypervCheckJobProgressRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries to check copy job progress." + }, + "hypervCheckJobProgressDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay between retries of check copy job progress." + }, + "hypervWmiJobProgressRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries to check WMI job progress." + }, + "hypervWmiJobProgressDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay between retries of check WMI job progress." + }, + "hypervIngestUseSingleReplica": { + "type": "boolean", + "description": "Whether to use single-replica (mirrored) or Reed-Solomon \\ for Hyper-V Ingest." + }, + "numParallelHypervRefresh": { + "type": "integer", + "format": "int32", + "description": "Number of parallel threads to get details of servers and deploy connectors to the hosts." + }, + "numParallelHypervExport": { + "type": "integer", + "format": "int32", + "description": "Number of parallel threads to export VM snapshot disks." + }, + "exportCleanupDisabled": { + "type": "boolean", + "description": "Disable cleanup of export disks that have been restored if the export job fails." + }, + "hypervMountPrefetchEnabled": { + "type": "boolean", + "description": "Whether to use Mjf prefetch for Hyperv mount snapshot." + }, + "hypervVmRestoreReadAheadEnabled": { + "type": "boolean", + "description": "Whether to use Sequential Read-Ahead in the Hyperv export jobs." + }, + "hypervExportCachingEnabled": { + "type": "boolean", + "description": "Whether enable caching for mergedSpec." + }, + "sdfsServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to SDFS service." + }, + "hypervBackupDisknameCaseSensitive": { + "type": "boolean", + "description": "Whether to make VHDX filenames case-sensitive when performing backups." + }, + "hypervVmCheckpointLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of checkpoints for one Hyper-V VM." + }, + "hypervExportedConfigDeletionRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when deleting exported Hyper-V config files." + }, + "hypervExportedDiskDeletionRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when deleting exported Hyper-V disks." + }, + "hypervExportedConfigDeletionInitialSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep time in milliseconds when deleting exported Hyper-V config files." + }, + "hypervExportedDiskDeletionInitialSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep time in milliseconds when deleting exported Hyper-V disks." + }, + "hypervLiveMountCleanupRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry cleaning up a Live Mount when undoing MOUNT_HYPERV_SNAPSHOT." + }, + "hypervLiveMountCleanupInitialSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep time in milliseconds for cleaning up a Live Mount when undoing MOUNT_HYPERV_SNAPSHOT." + }, + "hypervDestroyPlannedVirtualMachineRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when destroying the planned virtual machine, while undoing a Hyper-V export." + }, + "hypervDestroyPlannedVirtualMachineInitialSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep time in milliseconds when destroying the planned virtual machine, when undoing a Hyper-V export." + }, + "hypervEnableCorruptionFix": { + "type": "boolean", + "description": "When enabled, fixes corruptions using fulls or fingerprint based ingest based on hypervUseFingerprintingForCorruptionFix config." + }, + "hypervUseFingerprintingForCorruptionFix": { + "type": "boolean", + "description": "When enabled, use disk fingerprint based ingest for hyperv fulls taken to fix snapshot chain corruption. Uses full ingest when disabled." + }, + "hypervSmbNameLength": { + "type": "integer", + "format": "int32", + "description": "Length of Samba share name for hyperv backup." + }, + "hypervNumberRetriesForPersist": { + "type": "integer", + "format": "int32", + "description": "Number of retries when trying to update hyperv cluster metadata." + }, + "hypervVirtualDiskIdsToExclude": { + "type": "string", + "description": "A comma-separated list of Hyper-V virtual disk IDs that are excluded from backup." + }, + "enableHypervFastVirtualDiskBuild": { + "type": "boolean", + "description": "Feature flag for creating virtual disk images directly using the fast virtual disk builder, for Hyper-V virtual machine backup jobs." + }, + "migrateFastVirtualDiskBuild": { + "type": "boolean", + "description": "A boolean flag that controls the use of the fast VHDX builder during Hyper-V virtual machine migration. When the flag is 'true', the Hyper-V VM uses the fast VHDX builder the next time, VM is backed up. A value of false disables the fast VHDX builder. This flag is used in combination with the maxFullMigrationStoragePercentage value." + }, + "maxFullMigrationStoragePercentage": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the total available storage space. When performing a full hyperv VM backup operation would bring the total used storage space above this threshold, the cluster takes incremental backups instead. This value is used in combination with the migrateFastVirtualDiskBuild flag." + }, + "hypervVerifySnapshotPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of the data to verify in a disk. NOTE - The snapshot verification functionality is not supported for Hyper-V currently, setting this to a non-zero value would lead to backup failures." + }, + "hypervEnableParallelFetch": { + "type": "boolean", + "description": "Enables parallel fetching of snapshot data for Hyper-V." + }, + "hypervRemoveUnknownCheckpoints": { + "type": "boolean", + "description": "Indicates whether to remove unknown checkpoints." + } + } + }, + "GlobalInfinityConfig": { + "type": "object", + "properties": { + "vmwareSnapshotJobInitialSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Operations on vSphere objects through a session initially sleep for the specified interval between attempts." + }, + "vmwareSnapshotJobMaxSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Operations on vSphere objects through a session sleep for no longer than the length of this interval between attempts." + }, + "esxCbtBuildBlackList": { + "type": "string", + "description": "These are ESX6 build numbers that we have collected from cassandra dumps that we collect from our customers and host in our EC2 archive2 machine. To get to it one can do deployment/infrastructure.sh ssh archive2." + }, + "maxNestedVsphereSnapshots": { + "type": "integer", + "format": "int32", + "description": "Maximum number of nested vSphere snapshots to allow VMs to have." + }, + "minVsphereDatastoreFreeSpaceRatio": { + "type": "number", + "format": "double", + "description": "TODO." + }, + "vcenterWsClientConnectTimeoutInMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Connect timeout for vCenter web services client, in milliseconds." + }, + "vcenterWsClientReadTimeoutInMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Read timeout for vCenter web services client, in milliseconds." + }, + "backupScriptStatusCheckPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which status of a script running on vm is checked." + }, + "vsphereSnapshotRemovalTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for vSphere tasks to remove virtual machine snapshots, in seconds." + }, + "vsphereSnapshotTaskTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for vSphere snapshot related tasks, in seconds." + }, + "vsphereVmManageTaskTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for vSphere virtual machine management tasks, in seconds." + }, + "vsphereRelocateVmTaskTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for vSphere virtual machine storage vmotion tasks, in seconds." + }, + "perfMetricTimeAggregationPeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to aggregate performance metrics over." + }, + "vSphereMetricDatastoreTotalLatencyCounterId": { + "type": "integer", + "format": "int32", + "description": "Metric ID in vSphere API for datastore total latency counter." + }, + "vSphereMetricMaxTotalLatencyCounterId": { + "type": "integer", + "format": "int32", + "description": "Metric ID in vSphere API for max total latency counter." + }, + "vSphereDatastoreTotalReadLatencyCounterId": { + "type": "integer", + "format": "int32", + "description": "Metric ID in vSphere API for datastore total read latency counter." + }, + "shouldUseRealTimeDatastoreMetrics": { + "type": "boolean", + "description": "Whether or not to use the real-time datastore metric method to get the maximum datastore latency." + }, + "vSphereHttpsRequestTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds for HTTPS requests sent to vSphere." + }, + "nfsExportsOverride": { + "type": "string", + "description": "Override the contents of /etc/exports with the value specified in this parameter. An empty string means no override." + }, + "esxSubnets": { + "type": "string", + "description": "Preferred subnets to reach to esx hosts. Comma separated list e.g. 192.168.2.10/24,10.255.0.2/16." + }, + "bootstrapServicePort": { + "type": "integer", + "format": "int32", + "description": "Port used by Rubrik Bootstrap Service on all platforms." + }, + "backupAgentPort": { + "type": "integer", + "format": "int32", + "description": "Port used by Rubrik Backup Service on all platforms." + }, + "envoyBootstrapServicePort": { + "type": "integer", + "format": "int32", + "description": "Port used by Rubrik Bootstrap Service on Envoy VM." + }, + "envoyBackupAgentPort": { + "type": "integer", + "format": "int32", + "description": "Port used by Rubrik Backup Service on Envoy VM." + }, + "envoyHttpProxyPort": { + "type": "integer", + "format": "int32", + "description": "Port used by HTTP proxy on Envoy VM." + }, + "backupAgentInternalPort": { + "type": "integer", + "format": "int32", + "description": "Port used by Rubrik Backup Service internally to the cluster." + }, + "backupAgentInternalShareMountRoot": { + "type": "string", + "description": "Location inside the Rubrik node where shares will be mounted." + }, + "backupAgentChroot": { + "type": "string", + "description": "Location inside the Rubrik node where the agent chroots." + }, + "enableBackupAgentUpgrade": { + "type": "boolean", + "description": "(For internal use only) Whether to enable automatic upgrade of Rubrik Backup Service." + }, + "enableBackupAgentRollingUpgrade": { + "type": "boolean", + "description": "(For internal use only) Whether to enable automatic rolling upgrade of Rubrik Backup Service." + }, + "enableBackupAgentMulticlusterCertificatesUpgrade": { + "type": "boolean", + "description": "(For internal use only) Whether to enable updating supplemental certificates of Rubrik Backup Service. This lets bootstrap agent talk to multiple clusters." + }, + "generateConnectorJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of job generating the agent connector." + }, + "enablePackageAixBackupAgent": { + "type": "boolean", + "description": "Whether to enable packaging AIX backup agent." + }, + "enablePackageSunosBackupAgent": { + "type": "boolean", + "description": "Whether to enable packaging Solaris backup agent." + }, + "enablePackageHpuxBackupAgent": { + "type": "boolean", + "description": "Whether to enable packaging HPUX backup agent." + }, + "defaultMssqlLogBackupFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "The default log backup frequency for MSSQL databases, in seconds." + }, + "mssqlRecoverableChainReadBackoffDurationInMs": { + "type": "integer", + "format": "int32", + "description": "Backoff duration between failed read attempts of a MSSQL recoverable chain." + }, + "mssqlRecoverableChainReadAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of times to try reading a MSSQL recoverable chain." + }, + "mssqlMaximumLogsPerLogChainPage": { + "type": "integer", + "format": "int32", + "description": "The maximum number of serialized log metadata tuples to be stored in a MSSQL log chain page." + }, + "mssqlRecoverableChainMaximumErrors": { + "type": "integer", + "format": "int32", + "description": "The maximum number of serialized log backup error metadata to be stored in one MSSQL recoverable chain." + }, + "mssqlExpireRehydratedRangesInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to expire rehydrated MSSQL recoverable ranges." + }, + "windowsX64AgentDir": { + "type": "string", + "description": "Directory where we store backup agent files for Windows 64-bit platform." + }, + "windowsX64AgentVersionFileName": { + "type": "string", + "description": "Name of the backup agent version file for Windows 64-bit platform." + }, + "windowsX64AgentExeFileName": { + "type": "string", + "description": "Name of the backup agent executable for Windows 64-bit platform." + }, + "unixLikeAgentVersionFileName": { + "type": "string", + "description": "File containing the agent version for Linux 64-bit and AIX." + }, + "agentNumericVersionFileName": { + "type": "string", + "description": "File containing the three digit numeric product version." + }, + "envoyAgentVersionFileName": { + "type": "string", + "description": "File containing the agent version for Envoy." + }, + "envoyBackupAgentBinaryPath": { + "type": "string", + "description": "Location of the Envoy Backup Agent binary on the cluster." + }, + "envoyUpgradeTarPath": { + "type": "string", + "description": "Location of the Envoy Upgrade tarball on the cluster." + }, + "refreshEnvoyStatusJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node." + }, + "refreshEnvoyStatusJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Refresh envoy status job frequency in minutes." + }, + "windowsClusterUpdateRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of attempts to update a WindowsCluster during discovery or host removal." + }, + "managedObjectApiCacheTtlSecs": { + "type": "integer", + "format": "int32", + "description": "Cache TTL for /managed_object endpoint (in seconds)." + }, + "vmwareRestoreWriteParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in data restore when sending data to a VMWare VM." + }, + "vmwareRestoreReadParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used by agent in data restore when reading data from SDFS and writing to a VMWare VM." + }, + "restoreFileTransferTimeoutFactorInMBps": { + "type": "number", + "format": "double", + "description": "We calculate the file transfer to guest timeout using this value. For example, if we want to send a 1MB file and the value is 0.05 (MBps), then the timeout is 20s. This value is essentially the lowest MBps transfer rate tolerated when we are pushing a file to the guest VM. Be aware of VMware KB 2144004 - the transfer rate to guest VMs is limited to 2MBps." + }, + "minRestoreFileTransferTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The minimum file transfer timeout in seconds during the restore file job. This is to protect against too small of timeouts for small files." + }, + "restoreFileCreateDirOrLinkTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The amount of time to wait for creating a guest directory or symlink in the restore file job before timing out." + }, + "enableHostNameOverrideForPushFileToGuest": { + "type": "boolean", + "description": "Whether we allow overriding the API-returned host IP with the VMware host's name when we push a file to guest." + }, + "enableHostNameOverrideForDownloadFileFromGuest": { + "type": "boolean", + "description": "Whether we allow overriding the API-returned host IP with the VMware host's name when we download a file from guest." + }, + "vssSnapshotTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for a VSS snapshot operation." + }, + "jobPingIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Interval at which job sends heartbeat requests to the windows agent." + }, + "jobPingShutdownTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time up to which the job waits for the agent ping request to complete, before terminating the thread forcibly." + }, + "guestOperationRetries": { + "type": "integer", + "format": "int32", + "description": "How many times to retry guest operations to handle transient guest errors." + }, + "guestOperationRetryTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to wait before retrying guest operation after a transient guest error." + }, + "mssqlEnableRecoveryCompatibilityCheck": { + "type": "boolean", + "description": "Whether to check the export/restore target instance is compatible with the source database." + }, + "downloadVsphereDiagnosticsLogOnFailure": { + "type": "boolean", + "description": "Whether to download vSphere log diagnostics during undo of vSphere tasks." + }, + "maxBytesToDownloadFromVmwareLog": { + "type": "integer", + "format": "int32", + "description": "The maximum bytes we download from the vmware.log, from the end of file. If it is larger than the log size, it will retrieve the complete file." + }, + "collectAclsDuringSnapshotForWindowsVms": { + "type": "boolean", + "description": "Whether we should collect file permissions for Windows VMs during snapshot." + }, + "supportedESXHostVersion": { + "type": "string", + "description": "The supported vmware ESX HOST version list." + }, + "vmwareProxyVmNamePrefix": { + "type": "string", + "description": "The prefix prepended to names of proxy VMs created by Rubrik in user vCenter. Proxy VMs are created for storage array based ingest. This prefix contains a magic number which serves as a unique identifier. Such VMs should not be refreshed during VmwareRefresh. This prefix will also be used to cleanup leftover proxy VMs." + }, + "vmwareBackupCleanupArraySnapshotsFromHost": { + "type": "boolean", + "description": "Whether to cleanup array snapshot datastores from vmware host after the backup is done. Cleanup requires an hba rescan so we might want to skip it in cases where the penalty of rescan outweighs the annoyance of having unused datastores lying around." + }, + "shouldDeleteRestoreFileGuestWorkingDir": { + "type": "boolean", + "description": "Whether we should delete the guest VM temporary working dir created by the RESTORE_FILE job. This toggle is useful when we want to examine the files created in the tmp dir while debugging customer cases." + }, + "removeVmwareSnapshotRetryDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "The time period during which removing VMware snapshot operations should be tried again." + }, + "removeVmwareSnapshotMaxSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "The maximum amount of time to sleep before trying to remove the VMware snapshot again." + }, + "resolveVmwareSnapshotIssuesDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "The time period during which resolving VMware snapshot issue operations should be tried again." + }, + "resolveVmwareSnapshotIssuesMaxTries": { + "type": "integer", + "format": "int32", + "description": "The maximum times that resolving VMware snapshot issue operation should be tried." + }, + "mssqlLogBackupTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for a SQL Server log backup operation." + }, + "mssqlDefaultMaxDataStreamsPerDatabase": { + "type": "integer", + "format": "int32", + "description": "The default value of maximum number of data streams per database." + }, + "hostPublicCertificateFolder": { + "type": "string", + "description": "A folder where the public certificates for known hosts will be stored." + }, + "allAuditHostsCerts": { + "type": "string", + "description": "A file that holds all of the host certs that have auditing enabled." + }, + "vmPublicCertificateFolder": { + "type": "string", + "description": "A folder where the public certificates for known virtual machine will be stored." + }, + "envoyPublicCertificateFolder": { + "type": "string", + "description": "A folder where the public certificates for known envoy vms will be stored." + }, + "agentMulticlusterCertificatesFolder": { + "type": "string", + "description": "A folder where cluster public certificates will be stored to sync to agents, to enable multicluster agent support." + }, + "vssThreadTimeOutInMinute": { + "type": "integer", + "format": "int32", + "description": "The number of minutes that windows background thread that wait if there is no status update. This will prevent the thread that holds vss mutex hang there forever and not release mutex." + }, + "dataTransferNotificationIntervalInMInute": { + "type": "integer", + "format": "int32", + "description": "The interval in minute that cluster notify the windows client that transfer is still in progress." + }, + "windowsMsiPath": { + "type": "string", + "description": "The path where we store the msi for windows agent." + }, + "scvmmDeployAgentScriptPath": { + "type": "string", + "description": "Name of script for deploying window agent to hyperv host from SCVMM custom resource." + }, + "scvmmDeploymentReadMePath": { + "type": "string", + "description": "Name of read me for deploying window agent to hyperv host from SCVMM custom resource." + }, + "certificateFileNameForWindows": { + "type": "string", + "description": "The cluster certificate file name for windows agent." + }, + "vmwareFileRestoreUseZip": { + "type": "boolean", + "description": "Should zip fle restoration method be attempted (unconditionally fallback to file-by-file on False)." + }, + "vmwareFileRestoreUseNtfsSecAudit": { + "type": "boolean", + "description": "Should restoring Windows ACLs from raw block device via `ntfs-3g.secaudit` be attempted (unconditionally fallback to the list generated during backup on False)." + }, + "guestOsWithVssSupport": { + "type": "string", + "description": "Guest Operating Systems for which VSS consistent snapshots are supported." + }, + "sambaShareNameLength": { + "type": "integer", + "format": "int32", + "description": "Recommended length for the name of a Samba share. A name of this length composed of random lowercase alphanumeric characters should be resistant to brute-force guessing." + }, + "vmwareVixDefaultTransportModes": { + "type": "string", + "description": "Priority order of transport modes to use by default for vix disk connections." + }, + "tlsNameVerificationForVmware": { + "type": "boolean", + "description": "Whether to verify name in HTTPS session when connecting to Vmware hosts." + }, + "vmwareVerifyIncrementalsMaxBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of bytes to verify for a Vmware Incremental." + }, + "vmwareVerifyIncrementalsMaxPercent": { + "type": "integer", + "format": "int32", + "description": "Maximum % of disk size to verify for a Vmware Incremental. 100 means full verification, and vmwareVerifyIncrementalsMaxBytes is ignored; otherwise, number of bytes verified equals to minimal of vmwareVerifyIncrementalsMaxBytes and % of disk size. 0 means verification is disabled." + }, + "sequenceNumberGenerationMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry generating a sequence number." + }, + "vmwareMountNfsCommonOptions": { + "type": "string", + "description": "Comma separated list of common NFS share options used by all VMware virtual machine live mounts." + }, + "vmwareEnableCbtFailureAlertGracePeriodInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to wait between notifications that VMware CBT cannot be enabled." + }, + "esxDatastoreAccessibleTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "The number of seconds to wait a new NAS datastore to become accessible." + }, + "vmLookforHostRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries if we can't find the host of virtual machine." + }, + "vmLookforHostSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "The time to sleep if we fail previous try to retrieve a host object for a given virtual machine." + }, + "vSphereSessionEnableSimulation": { + "type": "boolean", + "description": "If set, vSpheres added with vSphereSimulatorUsername as the username will connect to a simulator instead. Used for testing." + }, + "vSphereSimulatorUsername": { + "type": "string", + "description": "If simulation is enabled, this username indicates that a vSphere is a simulator when added through the UI." + }, + "vSphereSimulatorDataStoreName": { + "type": "string", + "description": "Name prefix used for simulation datacenters. Vix uses this to discriminate between simulated and real virtual disks." + }, + "vSphereMinimunHardwareVersion": { + "type": "integer", + "format": "int32", + "description": "The minimum vSphere hardware version that we will support." + }, + "vmwareAdvancedConfigsToExclude": { + "type": "string", + "description": "The list of VMware virtual machine advanced configs to be excluded when mount or export a new virtual machine." + }, + "autoInstallAgentOnVms": { + "type": "boolean", + "description": "Determines whether to automatically install the Rubrik Backup Service on virtual machines. Default is false, which means automatic installation is disabled. Set to true to enable automatic installation." + }, + "iscsiDiskIdMappingSleepMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep after attaching to iSCSI target before mapping disk ids to block device paths using entries in /dev/disk/by-id which may take a few seconds to populate." + }, + "vsphereDeleteNasDatastoreRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for deleting a NAS datastore from vSphere." + }, + "vsphereDeleteNasDatastoreRetryIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to sleep between retries when deleting a NAS daatastore from vSphere." + }, + "windowsAgentStartupPeriodSecs": { + "type": "integer", + "format": "int32", + "description": "Number of seconds taken by windows agent to start listening at the port after being installed." + }, + "secureAgentServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while waiting for Agent server response." + }, + "agentServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while waiting for Agent server response." + }, + "agentServerVerifyFlatFileTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while waiting for agent server response when verifying flat file fingerprints." + }, + "resetCbtOnValidationFailure": { + "type": "boolean", + "description": "If VMware incremental snapshot fails on CBT validation, force the next snapshot to be a full." + }, + "saveVmwareCbtFile": { + "type": "boolean", + "description": "Whether to save the CBT file for VMware incremental snapshots." + }, + "logRetentionDays": { + "type": "integer", + "format": "int32", + "description": "Number of days the log files are retained on the cluster." + }, + "hostLogsDir": { + "type": "string", + "description": "Directory where log files and folders are present." + }, + "stagingBase": { + "type": "string", + "description": "Base path where the log directory is present." + }, + "unmanagedSnapshotCountThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of an object's non-gc-ed snapshots over which we return stale cached values for the object's snapshots in unmanaged objects query." + }, + "vmwareIgnoreRestoreErrors": { + "type": "boolean", + "description": "Tolerate failure during file write or creation for vSphere file restore jobs." + }, + "useFloatingIpForMounts": { + "type": "boolean", + "description": "Use floating ips for VMware Live Mounts." + }, + "envoyHostPublicCertificate": { + "type": "string", + "description": "Public certificate of the envoy host." + }, + "storageArrayScriptStatusCheckPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which status of a script running on host is checked." + }, + "envoyVersionFileName": { + "type": "string", + "description": "Use the version mentioned in the file name for Envoy Agent & Ova." + }, + "vsphereConnectionTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "vSphere Connection timeout limit in milliseconds." + }, + "vCenterUUIDCacheExpireTimeInMs": { + "type": "integer", + "format": "int32", + "description": "How long we cache a vCenter UUID in cache to avoid frequent logins to vCenter." + }, + "envoyVmScriptStatusCheckPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which status of a script running on VM is checked." + }, + "envoyVmScriptTerminateTimeoutMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which a script that has received SIGTERM must terminate. After this a SIGKILL will be sent." + }, + "vmwareDatastoreMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "This value specifies the maximum number of VMware backup jobs that can run concurrently on the same datastore. This maximum only applies to backup jobs based on storage arrays." + }, + "nonBackupHostThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent non-backup operations per ESXi host." + }, + "allowAutoProtectForExportedVm": { + "type": "boolean", + "description": "A boolean value that specifies whether the exported virtual machine inherits the SLA assigned to the vSphere parent. When this value is set to false, the exported virtual machine is marked as unprotected. When this value is set to true, the exported virtual machine inherits the SLA assigned to the vSphere parent." + }, + "ignoreVappConfigWhenRestore": { + "type": "boolean", + "description": "Boolean value that determines whether the Rubrik cluster ignores the vAppConfig values of the source virtual machine when creating a virtual machine from a snapshot for Instant Recovery, Live Mount, or Export. When true, the Rubrik cluster ignores the vAppConfig values of the source virtual machine. When false, the Rubrik cluster applies the vAppConfig values of the source virtual machine." + }, + "forceArchiveVsphereObjectsInRefresh": { + "type": "boolean", + "description": "A Boolean value that determines whether to force the archiving of vSphere objects that are not visible to the Rubrik service account due to path permissions. When 'true', vSphere objects that are not visible but still detectable as existing are archived. When 'false', vSphere objects that are not visible but still detectable as existing are left unarchived." + }, + "rescanStorageDuringFullVcenterRefresh": { + "type": "boolean", + "description": "Boolean value that determines whether to instruct all ESXi hosts to rescan for and retire unused VMFS datastores and LUN storage devices during a full vCenter refresh. Set to true to trigger a rescan. Set to false to prevent a rescan." + }, + "downloadVmxFileDuringSnapshot": { + "type": "boolean", + "description": "Boolean value that specify that if we should save vmx path and download file content during we take snapshot." + }, + "sapLogsRelativePathInSdScratch": { + "type": "string", + "description": "SAP logs scratch directory." + }, + "sapBackintStatusPersistRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of attempts in order to persist a SAP backint status to SerializedMetadata." + }, + "useRouteBasedIpSelectionForHyperv": { + "type": "boolean", + "description": "Use route-based IP selection for hyperv." + }, + "vmwareLinkedCloneCreationTimeoutInMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for vsphere task to create a linked clone." + }, + "vmDeletionRetriesSleepInMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Attempts to delete a VMware virtual machine wait for the specified interval before retrying." + }, + "backupJobTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Specifies the backup job timeout value before failing the job." + }, + "backupJobHeartbeatTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Specifies the timeout value before removing the snapshot." + }, + "execCommandTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds for for generic exec commands." + }, + "hostRegisterAndRefreshRequestTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "The host register and host refresh request timeout in seconds." + }, + "oracleDiscoveryTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Oracle discovery request timeout in seconds." + }, + "sleepTimeForCheckingRbaUpgradeJobStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checking for a rba upgrade job execution status." + }, + "waitForRbaUpgradeJobThresholdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait for an rba upgrade job to finish before logging a warning message." + }, + "enableBackupAgentCoreDumps": { + "type": "boolean", + "description": "The flag to enable internal backup agent core dump." + }, + "enableAgentServerCoreDumps": { + "type": "boolean", + "description": "The flag to enable agent server core dump." + }, + "solarisCpuArchitectureI386": { + "type": "string", + "description": "cpu architecture name for solaris on i386." + }, + "solarisCpuArchitectureSparc": { + "type": "string", + "description": "cpu architecture name for solaris on sparc." + }, + "solarisAgentPathPrefix": { + "type": "string", + "description": "path prefix of solaris backup agent." + }, + "solarisAgentFileName": { + "type": "string", + "description": "file name of solaris backup agent." + }, + "nfsExportTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for NFS export/unexport." + }, + "waitForGuestShutdownTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the total wait time for guest shutdown." + }, + "waitForGuestShutdownCheckIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "This represents the time interval when waiting for guest shutdown." + }, + "hpuxAgentBinaryPath": { + "type": "string", + "description": "HPUX backup agent main binary path." + }, + "hpuxAgentFlagsPath": { + "type": "string", + "description": "HPUX backup agent flags path." + }, + "ignoreSplitMjfSetJobContextErrors": { + "type": "boolean", + "description": "Whether to ignore error when setting JobContext on SplitMjfs." + }, + "timeoutAixBinaryPath": { + "type": "string", + "description": "File path to the AIX timeout binary on the cluster." + }, + "oracleAgentLinuxBinaryPath": { + "type": "string", + "description": "File path to the Linux Oracle binary on the cluster." + }, + "oracleAgentAixBinaryPath": { + "type": "string", + "description": "File path to the AIX Oracle binary on the cluster." + }, + "timeoutBinaryPathOnRemote": { + "type": "string", + "description": "File path to the timeout binary on the remote host." + }, + "oracleAgentBinaryPathOnRemote": { + "type": "string", + "description": "File path to the Oracle binary on the remote host." + }, + "agentVersionPathOnRemote": { + "type": "string", + "description": "File path to the agent version file on the remote host." + }, + "deleteBinaryRetries": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times to retry deleting a binary file on a remote host." + }, + "deleteBinarySleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Rubrik cluster sleeps for the specified interval before attempting to delete a binary file on a remote host." + }, + "copyBinaryParallelism": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the maximum number of concurrent requests for transferring a binary file from the Rubrik cluster to a remote host." + }, + "copyBinaryRetries": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times to retry the copy of a binary file from the Rubrik cluster to a remote host." + }, + "copyBinarySleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Rubrik cluster sleeps for the specified interval before attempting to copy a binary file to a remote host." + }, + "chmodBinaryRetries": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of times to retry changing the mode of a binary file on a remote host." + }, + "chmodBinarySleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Rubrik cluster sleeps for the specified interval before attempting to change the mode of a binary file on a remote host." + }, + "makePrimaryRetries": { + "type": "integer", + "format": "int32", + "description": "Integer specifying the number of times to retry the operation for RPC to become the primary cluster for the agent." + }, + "makePrimarySleepInMs": { + "type": "integer", + "format": "int32", + "description": "Specifies the interval, in milliseconds, the Rubrik cluster sleeps before attempting to become the primary cluster for the agent." + }, + "remoteFileMode": { + "type": "integer", + "format": "int32", + "description": "An integer in decimal that specifies what mode to use when changing the permissions of a remote file. For example, a value of 493 corresponds to 0755 permissions." + }, + "vmwareHydrationJobMaxThrottledDueToVcenterIssuesCount": { + "type": "integer", + "format": "int32", + "description": "An integer representing the maximum number of times that a hydration job throttles if it finds something wrong with the vCenter resources provided to it." + }, + "vmwareHydrationJobMinJobsFailedBeforeThrowingValidationError": { + "type": "integer", + "format": "int32", + "description": "An integer representing the minimum number of hydration jobs that must fail before the validation API throws an error. This helps avoid throwing errors due to flakiness." + }, + "shouldClearManagedObjectInfo": { + "type": "boolean", + "description": "Boolean value indicating whether we should clear the managedBy field of the VM." + }, + "ipv4RouteSourceTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for request to get source IPv4 address for route to destination IPv4 address." + }, + "shouldCleanUpEnvoyOvaWorkingDir": { + "type": "boolean", + "description": "Boolean indicating whether we should clean up the directory where we generate the Envoy OVA to help debug OVA generation failures." + }, + "beginCreateWithRetriesForTranscodeSleepMs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. Attempts to beginCreate wait for the specified interval before retrying when there are too many JournalVPFs." + }, + "beginCreateWithRetriesMaxWaitTimeMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait for beginCreate retries to succeed." + }, + "enableDbLogBackupDelayNotification": { + "type": "boolean", + "description": "Boolean value indicating whether we should check the database log backup delay and send an email notification when the delay is longer than the threshold." + }, + "dbLogBackupDelayThresholdInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. The Rubrik cluster creates an email notification when a log backup is delayed longer than the specified interval." + }, + "dbLogBackupDelayNotificationFrequencyInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. When a log backup is delayed for longer than the threshold, the Rubrik cluster sends an email notification at the specified interval." + }, + "dbLogBackupDelayThresholdMinValueInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies a value in minutes. The value specifies the minimum duration of delay for a log backup before the Rubrik cluster creates an email notification." + }, + "dbLogBackupDelayThresholdMaxValueInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies a value in minutes. The value specifies the maximum duration of delay for a log backup before the Rubrik cluster creates an email notification." + }, + "dbLogBackupDelayNotificationFrequencyMinValueInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies a value in minutes. The frequency of emails sent by the Rubrik cluster about log backup delays cannot be less than the specified value." + }, + "dbLogBackupDelayNotificationFrequencyMaxValueInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies a value in minutes. The frequency of emails sent by the Rubrik cluster about log backup delays cannot be more than the specified value." + }, + "dbLogBackupDelayNotificationJobRetries": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of retries for the database log backup delay notification job." + }, + "snappableFileRestoreParallelism": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent files to restore." + }, + "linuxAgentBuildVersionPath": { + "type": "string", + "description": "Location of the Linux agent build version path on the cluster." + }, + "aixAgentBuildVersionPath": { + "type": "string", + "description": "Location of the AIX agent build version path on the cluster." + }, + "agentMaxMulticlusterReplicationCerts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of replication certificates to copy to agent. While the certificates are small, this limit defends against pathological cases that would use too much space or take a long time to deploy." + }, + "solarisSparcAgentBuildVersionPath": { + "type": "string", + "description": "Location of the Solaris Sparc agent build version path on the cluster." + }, + "solarisI386AgentBuildVersionPath": { + "type": "string", + "description": "Location of the Solaris i386 agent build version path on the cluster." + }, + "hpuxAgentBuildVersionPath": { + "type": "string", + "description": "Location of the HPUX agent build version path on the cluster." + }, + "vmwareEnablePipelinedFileRestore": { + "type": "boolean", + "description": "Whether to enable the agent to use the Fileset restore pipeline when performing VMware file restore." + }, + "enableVmwareInPlaceRecovery": { + "type": "boolean", + "description": "Indicates whether in-place recovery should be enabled for VMware virtual machines or not." + }, + "hostConfigurationCacheLimit": { + "type": "integer", + "format": "int32", + "description": "the maximum number of entries of cached host configurations." + } + } + }, + "GlobalInfraConfig": { + "type": "object", + "properties": { + "gcIdempotentOperationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for GC idempotent operation jobs in minutes." + }, + "gcIdempotentOperationTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds after which the idempotent operation entries will be GCed." + } + } + }, + "GlobalJarvisConfig": { + "type": "object", + "properties": { + "clusterwideStatsUpdaterRunCountPerJob": { + "type": "integer", + "format": "int32", + "description": "Number of times stats updater runs in a job." + }, + "clusterwideStatsUpdaterJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent clusterwide stats updater jobs per node." + }, + "logUploadRegion": { + "type": "string", + "description": "S3 bucket region where the data is uploaded." + }, + "productMetricsUploadRealm": { + "type": "string", + "description": "Specifies the realm which is used to determine which S3 bucket product metrics bundles will be uploaded to." + }, + "productMetricsUploadBucket": { + "type": "string", + "description": "S3 bucket where metrics metadata is uploaded." + }, + "logUploadBucket": { + "type": "string", + "description": "S3 bucket where support bundle data is uploaded." + }, + "logUploadAccessKey": { + "type": "string", + "description": "S3 Access Keys for uploading support bundle and metadata. These keys only have write permission to the bucket." + }, + "logUploadSecretKey": { + "type": "string", + "description": "S3 Secret Keys for uploading support bundle and metadata." + }, + "systemStats": { + "type": "string", + "description": "Specifies whether the sending of the system stats (metrics) through InfluxStack is enabled." + }, + "capacityBundleUploadEnabled": { + "type": "boolean", + "description": "True if capacity bundle upload is enabled." + }, + "capacityBundleUploaderFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for stats collecting jobs in minutes." + }, + "capacityBundleNotificationDay": { + "type": "integer", + "format": "int32", + "description": "Day on which we send capacity bundle notifixation." + }, + "errorLogs": { + "type": "string", + "description": "Specifies whether the sending of the error logs through heka is enabled." + }, + "uploads": { + "type": "string", + "description": "Specifies whether the uploads (support_bundle, report_bundle, metadata tables upload) are enabled." + }, + "traces": { + "type": "string", + "description": "Specifies whether the sending of the traces is enabled." + }, + "telemetryLevel": { + "type": "string", + "description": "Cluster telemetry level." + }, + "enableReportBundleUpload": { + "type": "boolean", + "description": "Whether to enable global report bundle upload job." + }, + "enableMetricMetadataUpload": { + "type": "boolean", + "description": "Whether to enable global metric metadata upload job." + }, + "uploadReportBundleFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for report bundle upload job." + }, + "uploadMetricsMetadataFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for metrics metadata upload job." + } + } + }, + "GlobalJobIoPriorityConfig": { + "type": "object", + "properties": { + "API_INTERACTIVE_REQUEST": { + "type": "string", + "description": "IO priority for API generated interactive query." + }, + "BACKUP_FAILURE_REMEDIATION_RETRY": { + "type": "string", + "description": "IO priority for BACKUP_FAILURE_REMEDIATION_RETRY job." + }, + "BACKUP_INTEGRITY_VERIFICATION": { + "type": "string", + "description": "IO priority for Backup verification job." + }, + "DOWNLOAD_CLOUD_NATIVE_FILE": { + "type": "string", + "description": "IO priority for DOWNLOAD_CLOUD_NATIVE_FILE jobs." + }, + "DOWNLOAD_MSSQL_BACKUP_FILES": { + "type": "string", + "description": "IO priority for DOWNLOAD_MSSQL_BACKUP_FILES jobs." + }, + "DOWNLOAD_SNAPPABLE_FILE": { + "type": "string", + "description": "IO priority for DOWNLOAD_SNAPPABLE_FILE jobs." + }, + "DOWNLOAD_SNAPSHOT_BLOB_CHAIN_FROM_ARCHIVE": { + "type": "string", + "description": "IO priority for DOWNLOAD_SNAPSHOT_BLOB_CHAIN_FROM_ARCHIVE jobs." + }, + "DOWNLOAD_SNAPSHOT_FROM_ARCHIVE": { + "type": "string", + "description": "IO priority for DOWNLOAD_SNAPSHOT_FROM_ARCHIVE jobs." + }, + "EXPORT_APP_BLUEPRINT_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_APP_BLUEPRINT_SNAPSHOT jobs." + }, + "EXPORT_CLOUD_NATIVE_VM_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_CLOUD_NATIVE_VM_SNAPSHOT jobs." + }, + "EXPORT_HYPERV_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_HYPERV_SNAPSHOT jobs." + }, + "EXPORT_NUTANIX_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_NUTANIX_SNAPSHOT jobs." + }, + "EXPORT_ORACLE_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_ORACLE_SNAPSHOT jobs." + }, + "EXPORT_ORACLE_TABLESPACE": { + "type": "string", + "description": "IO priority for EXPORT_ORACLE_TABLESPACE jobs." + }, + "EXPORT_STORAGE_ARRAY_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_STORAGE_ARRAY_SNAPSHOT jobs." + }, + "EXPORT_VMWARE_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPORT_VMWARE_SNAPSHOT jobs." + }, + "INPLACE_EXPORT_VMWARE_SNAPSHOT": { + "type": "string", + "description": "IO priority for INPLACE_EXPORT_VMWARE_SNAPSHOT jobs." + }, + "EXPORT_VMWARE_SNAPSHOT_TO_ESXHOST": { + "type": "string", + "description": "IO priority for EXPORT_VMWARE_SNAPSHOT_TO_ESXHOST jobs." + }, + "FILESET_RESTORE_FILE": { + "type": "string", + "description": "IO priority for FILESET_RESTORE_FILE jobs." + }, + "HDFS_RESTORE_FILE": { + "type": "string", + "description": "IO priority for HDFS_RESTORE_FILE jobs." + }, + "INPLACE_RESTORE_CLOUD_NATIVE_VM": { + "type": "string", + "description": "IO priority for INPLACE_RESTORE_CLOUD_NATIVE_VM jobs." + }, + "KVSNAPSHOT_FILE_RESTORE": { + "type": "string", + "description": "IO priority for data read from KVSnapshot Service." + }, + "MANAGED_VOLUME_DOWNLOAD_SNAPPABLE_FILE": { + "type": "string", + "description": "IO priority for operations done on mounts created by DOWNLOAD_SNAPPABLE_FILE jobs for Managed Volume snapshots." + }, + "MANAGED_VOLUME_EXPORT_SNAPSHOT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_EXPORT_SNAPSHOT jobs." + }, + "MANAGED_VOLUME_MAIN_MOUNT_READ_ONLY": { + "type": "string", + "description": "IO priority for operations done on read-only Managed Volume mounts." + }, + "MANAGED_VOLUME_RESIZE": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_RESIZE jobs." + }, + "MANAGED_VOLUME_SNAPSHOT_MOUNTER": { + "type": "string", + "description": "IO priority for operations done on mounts created by snpashot mounter tool for managed volumes." + }, + "MANAGED_VOLUME_SNAPSHOT_MOUNT_READ_ONLY": { + "type": "string", + "description": "IO priority for operations done on Managed Volume snapshot-exported mounts." + }, + "MOUNT_APP_BLUEPRINT_SNAPSHOT": { + "type": "string", + "description": "IO priority for MOUNT_APP_BLUEPRINT_SNAPSHOT jobs." + }, + "MOUNT_HYPERV_SNAPSHOT": { + "type": "string", + "description": "IO priority for MOUNT_HYPERV_SNAPSHOT jobs." + }, + "MOUNT_ORACLE_SNAPSHOT": { + "type": "string", + "description": "IO priority for MOUNT_ORACLE_SNAPSHOT jobs." + }, + "MOUNT_SNAPSHOT": { + "type": "string", + "description": "IO priority for MOUNT_SNAPSHOT jobs." + }, + "MOUNT_VOLUME_GROUP_SNAPSHOT": { + "type": "string", + "description": "IO priority for MOUNT_VOLUME_GROUP_SNAPSHOT jobs." + }, + "MSSQL_DB_MOUNT": { + "type": "string", + "description": "IO priority for MSSQL_DB_MOUNT jobs." + }, + "RESTORE_FILE": { + "type": "string", + "description": "IO priority for RESTORE_FILE jobs." + }, + "RESTORE_HYPERV_FILE": { + "type": "string", + "description": "IO priority for RESTORE_HYPERV_FILE jobs." + }, + "RESTORE_MSSQL_DB": { + "type": "string", + "description": "IO priority for RESTORE_MSSQL_DB jobs." + }, + "RESTORE_NUTANIX_FILE": { + "type": "string", + "description": "IO priority for RESTORE_NUTANIX_FILE jobs." + }, + "RESTORE_STORAGE_ARRAY_FILE": { + "type": "string", + "description": "IO priority for RESTORE_STORAGE_ARRAY_FILE jobs." + }, + "RESTORE_VOLUME_GROUP_FILE": { + "type": "string", + "description": "IO priority for RESTORE_VOLUME_GROUP_FILE jobs." + }, + "SDFS_INTERNAL_INTERACTIVE": { + "type": "string", + "description": "IO priority for SDFS_INTERNAL_INTERACTIVE jobs." + }, + "SNAPSHOT_MOUNTER": { + "type": "string", + "description": "IO priority for SNAPSHOT_MOUNTER jobs." + }, + "UNMOUNT_ORACLE_SNAPSHOT": { + "type": "string", + "description": "IO priority for UNMOUNT_ORACLE_SNAPSHOT jobs." + }, + "UNMOUNT_SNAPSHOT": { + "type": "string", + "description": "IO priority for UNMOUNT_SNAPSHOT jobs." + }, + "VMWARE_VM_POWER_ON": { + "type": "string", + "description": "IO priority for VMWARE_VM_POWER_ON jobs." + }, + "API_LATENCY_SENSITIVE_REQUEST": { + "type": "string", + "description": "IO priority for API generated latency sensitive query." + }, + "CDP_INGEST": { + "type": "string", + "description": "IO priority for CDP_INGEST jobs." + }, + "SDFS_INTERNAL_LATENCY_SENSITIVE": { + "type": "string", + "description": "IO priority for SDFS_INTERNAL_LATENCY_SENSITIVE jobs." + }, + "ANALYZE_SNAPPABLE": { + "type": "string", + "description": "IO priority for ANALYZE_SNAPPABLE jobs." + }, + "API_SLA_DRIVEN_REQUEST": { + "type": "string", + "description": "IO priority for API generated sla driven request." + }, + "CDP_REPLICATION": { + "type": "string", + "description": "IO priority for CDP_REPLICATION jobs." + }, + "CLOUD_IMAGE_CONVERSION": { + "type": "string", + "description": "IO priority for CLOUD_IMAGE_CONVERSION jobs." + }, + "CONVERT_DISK_FORMAT": { + "type": "string", + "description": "IO priority for CONVERT_DISK_FORMAT jobs." + }, + "CREATE_APP_BLUEPRINT_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_APP_BLUEPRINT_SNAPSHOT jobs." + }, + "CREATE_FILESET_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_FILESET_SNAPSHOT jobs." + }, + "CREATE_FILESET_SNAPSHOT_FROM_ARRAY": { + "type": "string", + "description": "IO priority for CREATE_FILESET_SNAPSHOT_FROM_ARRAY jobs." + }, + "CREATE_HDFS_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_HDFS_SNAPSHOT jobs." + }, + "CREATE_HYPERV_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_HYPERV_SNAPSHOT jobs." + }, + "CREATE_NUTANIX_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_NUTANIX_SNAPSHOT jobs." + }, + "CREATE_ORACLE_LOG_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_ORACLE_LOG_SNAPSHOT jobs." + }, + "CREATE_ORACLE_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_ORACLE_SNAPSHOT jobs." + }, + "CREATE_SNAPMIRRORCLOUD_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_SNAPMIRRORCLOUD_SNAPSHOT jobs." + }, + "CREATE_STORAGE_ARRAY_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_STORAGE_ARRAY_SNAPSHOT jobs." + }, + "CREATE_VMWARE_MULTI_NODE_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_VMWARE_MULTI_NODE_SNAPSHOT jobs." + }, + "CREATE_VMWARE_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_VMWARE_SNAPSHOT jobs." + }, + "CREATE_VMWARE_SNAPSHOT_FROM_ARRAY": { + "type": "string", + "description": "IO priority for CREATE_VMWARE_SNAPSHOT_FROM_ARRAY jobs." + }, + "CREATE_VOLUME_GROUP_SNAPSHOT": { + "type": "string", + "description": "IO priority for CREATE_VOLUME_GROUP_SNAPSHOT jobs." + }, + "CUSTOM_TOOL_SLA_DRIVEN": { + "type": "string", + "description": "IO priority for sla driven customer tool." + }, + "DISTRIBUTE_CDM_SOFTWARE": { + "type": "string", + "description": "IO priority for DISTRIBUTE_CDM_SOFTWARE jobs." + }, + "EMAIL_REPORTS": { + "type": "string", + "description": "IO priority for EMAIL_REPORTS jobs." + }, + "EMAIL_REPORT_ONE_TIME": { + "type": "string", + "description": "IO priority for EMAIL_REPORT_ONE_TIME jobs." + }, + "EMAIL_SUBSCRIPTION": { + "type": "string", + "description": "IO priority for EMAIL_SUBSCRIPTION jobs." + }, + "EXPIRE_MSSQL_DOWNLOADED_RANGES": { + "type": "string", + "description": "IO priority for EXPIRE_MSSQL_DOWNLOADED_RANGES jobs." + }, + "EXPIRE_MSSQL_LOGS": { + "type": "string", + "description": "IO priority for EXPIRE_MSSQL_LOGS jobs." + }, + "GENERATE_CONNECTORS": { + "type": "string", + "description": "IO priority for GENERATE_CONNECTORS jobs." + }, + "GENERATE_CUSTOM_REPORT": { + "type": "string", + "description": "IO priority for GENERATE_CUSTOM_REPORT jobs." + }, + "GENERATE_EVENT_SQLITE_FILE": { + "type": "string", + "description": "IO priority for GENERATE_EVENT_SQLITE_FILE jobs." + }, + "GENERATE_REPORT_DATA_SOURCE": { + "type": "string", + "description": "IO priority for GENERATE_REPORT_DATA_SOURCE jobs." + }, + "GENERATE_REPORT_DATA_SOURCE_CSV": { + "type": "string", + "description": "IO priority for GENERATE_REPORT_DATA_SOURCE_CSV jobs." + }, + "GENERATE_REPORT_DATA_SOURCE_FREQUENT": { + "type": "string", + "description": "IO priority for GENERATE_REPORT_DATA_SOURCE_FREQUENT jobs." + }, + "GLOBAL_SEARCH": { + "type": "string", + "description": "IO priority for global search api query reads." + }, + "INDEX_SNAPPABLE_SNAPSHOTS": { + "type": "string", + "description": "IO priority for INDEX_SNAPPABLE_SNAPSHOTS jobs." + }, + "INSTANTIATE_SNAPSHOT_ON_CLOUD": { + "type": "string", + "description": "IO priority for INSTANTIATE_SNAPSHOT_ON_CLOUD jobs." + }, + "KVSNAPSHOT_FILE_BACKUP": { + "type": "string", + "description": "IO priority for backups taken by KVSnapshot Service." + }, + "LAMBDA_ANALYZE_CONTENT": { + "type": "string", + "description": "IO priority for LAMBDA_ANALYZE_CONTENT job." + }, + "LAMBDA_ANALYZE_CONTENT_PARALLEL": { + "type": "string", + "description": "IO priority for LAMBDA_ANALYZE_CONTENT_PARALLEL job." + }, + "MANAGED_VOLUME_BACKUP": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_BACKUP jobs." + }, + "MANAGED_VOLUME_BEGIN_SNAPSHOT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_BEGIN_SNAPSHOT jobs." + }, + "MANAGED_VOLUME_END_SNAPSHOT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_END_SNAPSHOT jobs." + }, + "MANAGED_VOLUME_EXPORT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_EXPORT jobs." + }, + "MANAGED_VOLUME_INDEX_SNAPPABLE_SNAPSHOTS": { + "type": "string", + "description": "IO priority for operations done on mounts created by INDEX_SNAPPABLE_SNAPSHOTS jobs for managed volumes." + }, + "MANAGED_VOLUME_MAIN_MOUNT_WRITABLE": { + "type": "string", + "description": "IO priority for operations done on writable Managed Volume mounts." + }, + "MANAGED_VOLUME_MAINTAIN": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_MAINTAIN jobs." + }, + "MANAGED_VOLUME_RESET": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_RESET jobs." + }, + "MANAGED_VOLUME_SNAPSHOT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_SNAPSHOT jobs." + }, + "MANAGED_VOLUME_SNAPSHOT_SLA_MV": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_SNAPSHOT_SLA_MV jobs." + }, + "MANAGED_VOLUME_UNEXPORT": { + "type": "string", + "description": "IO priority for MANAGED_VOLUME_UNEXPORT jobs." + }, + "MOVE_FINGER_PRINTS_AFTER_UPGRADE": { + "type": "string", + "description": "IO priority for MOVE_FINGER_PRINTS_AFTER_UPGRADE jobs." + }, + "MSSQL_APPLY_LOGS": { + "type": "string", + "description": "IO priority for MSSQL_APPLY_LOGS jobs." + }, + "MSSQL_DB_BACKUP": { + "type": "string", + "description": "IO priority for MSSQL_DB_BACKUP jobs." + }, + "MSSQL_DB_BATCH_BACKUP": { + "type": "string", + "description": "IO priority for MSSQL_DB_BATCH_BACKUP jobs." + }, + "MSSQL_LOG_BACKUP": { + "type": "string", + "description": "IO priority for MSSQL_LOG_BACKUP jobs." + }, + "MSSQL_LOG_REPLICATE": { + "type": "string", + "description": "IO priority for MSSQL_LOG_REPLICATE jobs." + }, + "MSSQL_LOG_UPLOAD": { + "type": "string", + "description": "IO priority for MSSQL_LOG_UPLOAD jobs." + }, + "MSSQL_MULTI_HOST_BATCH_BACKUP": { + "type": "string", + "description": "IO priority for MSSQL_MULTI_HOST_BATCH_BACKUP jobs." + }, + "OPEN_MJF": { + "type": "string", + "description": "IO priority inside MJF::Open()." + }, + "OPEN_SMJF": { + "type": "string", + "description": "IO priority inside SplitMJFFile::Open()." + }, + "OPTIMIZE_REPRESENTATION": { + "type": "string", + "description": "IO priority for OPTIMIZE_REPRESENTATION jobs." + }, + "ORACLE_LOG_REPLICATE": { + "type": "string", + "description": "IO priority for ORACLE_LOG_REPLICATE jobs." + }, + "ORACLE_LOG_UPLOAD": { + "type": "string", + "description": "IO priority for ORACLE_LOG_UPLOAD jobs." + }, + "ORACLE_VALIDATE_BACKUP": { + "type": "string", + "description": "IO priority for ORACLE_VALIDATE_BACKUP jobs." + }, + "POLARIS_PULL_REPLICATE": { + "type": "string", + "description": "IO priority for POLARIS_PULL_REPLICATE job." + }, + "PULL_REPLICATE": { + "type": "string", + "description": "IO priority for PULL_REPLICATE jobs." + }, + "REPLICATE_TO_CLOUD": { + "type": "string", + "description": "IO priority for REPLICATE_TO_CLOUD jobs." + }, + "SDFS_INTERNAL_SLA_DRIVEN": { + "type": "string", + "description": "IO priority for SDFS_INTERNAL_SLA_DRIVEN jobs." + }, + "SDFS_TRY_REDUCE_MEMORY_CONSUMPTION": { + "type": "string", + "description": "Minimum IO priority used for doing IOs to reduce SDFS memory consumption when it is under memory pressure." + }, + "STAGE_CDM_SOFTWARE": { + "type": "string", + "description": "IO priority for STAGE_CDM_SOFTWARE jobs." + }, + "UPLOAD": { + "type": "string", + "description": "IO priority for UPLOAD jobs." + }, + "UPLOAD_FILE": { + "type": "string", + "description": "IO priority for UPLOAD_FILE jobs." + }, + "UPLOAD_INDEX": { + "type": "string", + "description": "IO priority for UPLOAD_INDEX jobs." + }, + "VALIDATE_UPLOADED_PATCH_FILE": { + "type": "string", + "description": "IO priority for VALIDATE_UPLOADED_PATCH_FILE jobs." + }, + "API_BACKGROUND_REQUEST": { + "type": "string", + "description": "IO priority for API generated background request." + }, + "CAPACITY_BUNDLE_UPLOADER": { + "type": "string", + "description": "IO priority for CAPACITY_BUNDLE_UPLOADER job." + }, + "CLEANUP_OLD_SD_SCRATCH_BLOBSTORE": { + "type": "string", + "description": "IO priority for CLEANUP_OLD_SD_SCRATCH_BLOBSTORE job." + }, + "CLEANUP_REPORTS": { + "type": "string", + "description": "IO priority for CLEANUP_REPORTS job." + }, + "CLEANUP_SUBSCRIPTIONS": { + "type": "string", + "description": "IO priority for CLEANUP_SUBSCRIPTIONS job." + }, + "CONSOLIDATE_DC_BLOB_GROUP": { + "type": "string", + "description": "IO priority for CONSOLIDATE_DC_BLOB_GROUP jobs." + }, + "CONSOLIDATE_DT_BLOB_GROUP": { + "type": "string", + "description": "IO priority for CONSOLIDATE_DT_BLOB_GROUP jobs." + }, + "CONSOLIDATE_PATCH_FILES": { + "type": "string", + "description": "IO priority for CONSOLIDATE_PATCH_FILES jobs." + }, + "CROSS_DC_BLOB_GROUP": { + "type": "string", + "description": "IO priority for CROSS_DC_BLOB_GROUP jobs." + }, + "EXPIRE_ORACLE_LOGS": { + "type": "string", + "description": "IO priority for EXPIRE_ORACLE_LOGS jobs." + }, + "EXPIRE_SNAPPABLE_SNAPSHOT": { + "type": "string", + "description": "IO priority for EXPIRE_SNAPPABLE_SNAPSHOT jobs." + }, + "FILESET_DEDUP_CALCULATOR": { + "type": "string", + "description": "IO priority for FILESET_DEDUP_CALCULATOR jobs." + }, + "FILESET_SNAPSHOT_VERIFICATION": { + "type": "string", + "description": "IO priority for FILESET_SNAPSHOT_VERIFICATION jobs." + }, + "GC_DC_BLOB_GROUP": { + "type": "string", + "description": "IO priority for GC_DC_BLOB_GROUP jobs." + }, + "GC_STREAM_LOGS": { + "type": "string", + "description": "IO priority for GC_STREAM_LOGS jobs." + }, + "LOG_UPLOADER": { + "type": "string", + "description": "IO priority for LOG_UPLOADER jobs." + }, + "MATERIALIZE_FULL": { + "type": "string", + "description": "IO priority for MATERIALIZE_FULL jobs." + }, + "REFRESH_FAILOVER_CLUSTER_APP_METADATA": { + "type": "string", + "description": "IO priority for REFRESH_FAILOVER_CLUSTER_APP_METADATA jobs." + }, + "REFRESH_HOST_METADATA": { + "type": "string", + "description": "IO priority for REFRESH_HOST_METADATA jobs." + }, + "REFRESH_SIM_HASH_CACHE_DC_BLOB_STORE": { + "type": "string", + "description": "IO priority for REFRESH_SIM_HASH_CACHE_DC_BLOB_STORE jobs." + }, + "REPLICATION_SEEDING_EXPORT": { + "type": "string", + "description": "IO priority for REPLICATION_SEEDING_EXPORT jobs." + }, + "REPLICATION_SEEDING_IMPORT": { + "type": "string", + "description": "IO priority for REPLICATION_SEEDING_IMPORT jobs." + }, + "REVERSE_DC_BLOB_GROUP": { + "type": "string", + "description": "IO priority for REVERSE_DC_BLOB_GROUP jobs." + }, + "REVERSE_DT_BLOB_GROUP": { + "type": "string", + "description": "IO priority for REVERSE_DT_BLOB_GROUP jobs." + }, + "SDFS_INTERNAL_BACKGROUND": { + "type": "string", + "description": "IO priority for SDFS_INTERNAL_BACKGROUND jobs." + }, + "SNAPSHOT_INTEGRITY": { + "type": "string", + "description": "IO priority for SNAPSHOT_INTEGRITY jobs." + }, + "SUPPORT_BUNDLE_GENERATOR": { + "type": "string", + "description": "IO priority for SUPPORT_BUNDLE_GENERATOR jobs." + }, + "UPGRADE_HOST_RBA": { + "type": "string", + "description": "IO priority for UPGRADE_HOST_RBA jobs." + }, + "UPLOAD_METRICS_METADATA": { + "type": "string", + "description": "IO priority for UPLOAD_METRICS_METADATA jobs." + }, + "UPLOAD_REPORT_BUNDLE": { + "type": "string", + "description": "IO priority for UPLOAD_REPORT_BUNDLE jobs." + }, + "VERIFY_FULL_REPRESENTATION": { + "type": "string", + "description": "IO priority for VERIFY_FULL_REPRESENTATION." + }, + "VERIFY_REPRESENTATION": { + "type": "string", + "description": "IO priority for VERIFY_REPRESENTATION." + }, + "ARCHIVAL_MIGRATE_SNAPPABLE": { + "type": "string", + "description": "IO priority for ARCHIVAL_MIGRATE_SNAPPABLE job." + }, + "LOG_HYDRATION": { + "type": "string", + "description": "IO priority for LOG_HYDRATION job which runs as part of customer driven disaster recovery." + } + } + }, + "GlobalKvsnapshotConfig": { + "type": "object", + "properties": { + "kvSnapshotServerNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files to keep for KVSnapshotService." + }, + "kvSnapshotServerPort": { + "type": "integer", + "format": "int32", + "description": "Port for the snapshot server." + }, + "kvSnapshotServerInternalPort": { + "type": "integer", + "format": "int32", + "description": "Port for the internal snapshot server." + }, + "maxSubGroupsPerSuperGroup": { + "type": "integer", + "format": "int32", + "description": "Maximum number of blobstore subgroups per supergroup." + }, + "maxContentsPerSubGroup": { + "type": "integer", + "format": "int32", + "description": "Maximum number of blobstore contents per subgroup." + }, + "minSubGroupPerSuperGroup": { + "type": "integer", + "format": "int32", + "description": "Minimum number of subgroups per supergroup in kvsnapshot service." + }, + "kvSnapshotServerMaxRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum retry for any KV snapshot server thrift call." + } + } + }, + "GlobalLambdaConfig": { + "type": "object", + "properties": { + "enableAutomaticFmdUpload": { + "type": "boolean", + "description": "Whether to enable automatic upload of Filesystem Metadata for newly indexed snapshots." + }, + "uploadFilesystemMetadataJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval in minutes for the snappable lambda upload FMD job." + }, + "enableLambdaParserService": { + "type": "boolean", + "description": "Whether to enable the lambda parser service." + }, + "lambdaParserServiceLimitInBytes": { + "type": "integer", + "format": "int32", + "description": "Parsed output size limit in bytes." + }, + "analyzeSnappableUmlGuestMemoryInMb": { + "type": "integer", + "format": "int32", + "description": "Amount of memory for UML guest in MB." + }, + "tikaForkParserWorkerMemoryInMb": { + "type": "integer", + "format": "int32", + "description": "Amount of memory for Tika fork parser worker in MB." + }, + "tikaForkParserAcquireWaitDurationMs": { + "type": "integer", + "format": "int32", + "description": "Amount of time to wait to acquire a ForkClient." + }, + "useLiteTikaParserConfig": { + "type": "boolean", + "description": "Use Tika config that has lighter resource usage." + }, + "tikaForkParserWorkerMaxFilesProcessed": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files a tika fork worker can process before it is forcibly shut down." + }, + "ransomwareAnalysisTimeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum duration, in seconds, of a ransomware analysis job." + }, + "lambdaAnalysisJobRetryLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry any lambda analysis job." + }, + "contentAnalysisTimeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum duration, in seconds, of a content analysis job." + }, + "lambdaPrefixPathInSdScratch": { + "type": "string", + "description": "Lambda root scratch directory." + }, + "enableFmdUploadForAllResources": { + "type": "boolean", + "description": "Whether to enable upload of filesystem metadata for all resources." + }, + "defaultDiffFmdUploadPrefix": { + "type": "string", + "description": "Prefix that is prepended to uploaded differential filesystem metadata." + }, + "defaultFullFmdUploadPrefix": { + "type": "string", + "description": "Prefix that is prepended to uploaded full filesystem metadata." + }, + "maxSnapshotsToUploadAutomatically": { + "type": "integer", + "format": "int32", + "description": "Maximum number of snapshots to upload at once when the filesystem metadata is uploaded automatically." + }, + "contentAnalysisPathBatchSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of paths to iterate over in a single call to LambdaServer. This prevents too much time spent in any single call; instead the call will return with a path cursor for the start of the next batch." + }, + "contentAnalysisAnalyzableBatchSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to analyze in a single call to LambdaServer. This prevents too much time spent in any single call; instead the call will return with a path cursor for the start of the next batch." + }, + "contentAnalysisMaxFilesBeforeMerge": { + "type": "integer", + "format": "int32", + "description": "Number of SSTables to keep around before preemptively issuing a merge prior to job completion. This intends to limit the number of files in a directory so that SDFS is not impacted. This number is also not a hard limit, so we could possibly exceed this value temporarily." + }, + "contentAnalysisJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Content analysis jobs will run at the specified interval for any snappables that have periodic content analysis policies configured." + }, + "enableContentAnalysisChildJobRetry": { + "type": "boolean", + "description": "Enable child job retries. Allows us to force fail a child job if we need to in production without losing the progress." + }, + "dataGovTargetVMPartitionFilesAnalyzableSizeInGb": { + "type": "integer", + "format": "int32", + "description": "Target size of analyzable files in vm partitions. Default 100GB." + }, + "opentracingSamplingStrategy": { + "type": "string", + "description": "Jaeger Opentracing strategy. The default strategy samples at 0.000001 probability." + }, + "opentracingSamplingDurationMsec": { + "type": "integer", + "format": "int32", + "description": "Frequency by which new Sampling strategies are polled by Jaeger." + }, + "windowsAuditDefaultPowershellScriptsJson": { + "type": "string", + "description": "List of powershell commands that will enable auditing (as a JSON list)." + }, + "windowsAuditDefaultXPathSubscription": { + "type": "string", + "description": "XPath query to subscribe to windows audit events." + }, + "windowsAuditDefaultBlacklistStrings": { + "type": "string", + "description": "Strings to blacklist from Windows events as a JSON list." + }, + "sendAuditConfigJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. SendAuditConfig job will periodically send the AuditConfig to all Windows Hosts that have Auditing enabled." + } + } + }, + "GlobalManagedVolumeConfig": { + "type": "object", + "properties": { + "NFSClosedExportOptions": { + "type": "string", + "description": "Export options for managed volume in read-only mode." + }, + "NFSOpenedExportOptions": { + "type": "string", + "description": "Export options for managed volume in read-write mode." + }, + "NFSOpenedExportOptionsSyncMode": { + "type": "string", + "description": "Export options for MVs to be set to sync in read-write mode." + }, + "NFSAlwaysOpenExportOptions": { + "type": "string", + "description": "Export options for managed volume with 'alwaysOpen' attribute." + }, + "managedVolumeCleanupReadLockWaitMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time waiting for a the managed volume cleanup read write lock in read mode." + }, + "managedVolumeCleanupReadLockSleepMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checks for the managed volume cleanup read write lock in read mode." + }, + "managedVolumeCleanupWriteLockWaitMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time waiting for a the managed volume cleanup read write lock in write mode." + }, + "managedVolumeCleanupFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between managed volume cleanup jobs." + }, + "managedVolumeCleanupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume cleanup jobs per node." + }, + "managedVolumeCleanupWriteLockSleepMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checks for the managed volume cleanup read write lock in write mode." + }, + "managedVolumeCleanupDiscoverTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait for the managed volume cleanup discovery to scan all devices in a single node." + }, + "managedVolumeResetUnexportTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait for reset to unexport NFS on a node." + }, + "managedVolumeDestroyNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use for destroying managed volumes." + }, + "managedVolumeSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume snapshot job." + }, + "managedVolumeSnapshotFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between managed volume snapshot jobs." + }, + "managedVolumeExportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume export jobs per node." + }, + "managedVolumeUnexportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume unexport jobs per node." + }, + "managedVolumeGenerateScriptJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of managed volume script generation jobs that can run concurrently on a node." + }, + "managedVolumePatchConversionsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume patch file conversions per node." + }, + "managedVolumePatchFileCompressionType": { + "type": "string", + "description": "Compression type for managed volume patch files." + }, + "managedVolumePatchCreationStatusCheckMinPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Minimum time period between patch file creation checks for managed volumes." + }, + "managedVolumePatchCreationStatusCheckMaxPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time period between patch file creation checks for managed volumes." + }, + "managedVolumeMaxDiskSizeMb": { + "type": "integer", + "format": "int32", + "description": "Maximum size for a managed volume disk. This should be same as fileset filesetMaxVolumeSizeMb." + }, + "managedVolumeMinDiskSizeMb": { + "type": "integer", + "format": "int32", + "description": "Minimum size for a managed volume disk. This should be same as fileset filesetMinVolumeSizeMb." + }, + "managedVolumeMaxChannels": { + "type": "integer", + "format": "int32", + "description": "Maximum number of channels per managed volume." + }, + "managedVolumeSdfsMountPath": { + "type": "string", + "description": "Directory where managed volumes are mounted in Sdfs." + }, + "managedVolumeMountPath": { + "type": "string", + "description": "Directory where managed volumes are mounted for export." + }, + "managedVolumeSnapshotAcquireTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum wait time when acquiring the semaphore while taking snapshots." + }, + "managedVolumeMaintainJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume maintain jobs per node." + }, + "managedVolumeMaintainJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for managed volume maintainer jobs in minutes." + }, + "managedVolumeMaintainJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume maintain job." + }, + "managedVolumeExportJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume export job." + }, + "managedVolumeUnexportJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume unexport job." + }, + "managedVolumeResetJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume reset job." + }, + "managedVolumeResizeJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume resize job." + }, + "managedVolumeEndSnapshotMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of attempts at ending a managed volume snapshot." + }, + "managedVolumeSdfsSnapshotFileTimeout": { + "type": "integer", + "format": "int32", + "description": "SDFS RPC timeout for snapshotFile() during end snapshot job." + }, + "managedVolumeResetDiskMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of attempts at resetting a disk." + }, + "managedVolumeResetDiskSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between attempts for the managed volume reset disk." + }, + "managedVolumeDropCacheOnReset": { + "type": "boolean", + "description": "If set to true, explicitly drop the buffer cache on managed volume reset." + }, + "managedVolumeSleepAfterPoison": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to sleep after poisoning the MJF on managed volume reset." + }, + "managedVolumeNumParallelSnapshotDisk": { + "type": "integer", + "format": "int32", + "description": "Number of parallel SDFS snapshot disk calls per channel." + }, + "managedVolumeGenerateScriptJobRetries": { + "type": "integer", + "format": "int32", + "description": "Specifies the number of retries to make when attempting to generate a script." + }, + "managedVolumeStatePersistRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when trying to transition managed volume state." + }, + "managedVolumeConfigPersistRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when trying to persist managed volume config." + }, + "managedVolumeReportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume report jobs per node." + }, + "managedVolumeReportJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume report job." + }, + "managedVolumeReportJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between refreshes of managed volume stats." + }, + "managedVolumeMaxChannelsPerManagedVolume": { + "type": "integer", + "format": "int32", + "description": "Maximum number of channels per managed volume." + }, + "managedVolumeEnablePatchCache": { + "type": "boolean", + "description": "Whether PatchCache should be enabled for managed volumes." + }, + "managedVolumeIngestUseReedSolomon": { + "type": "boolean", + "description": "Whether to use Reed-Solomon or Mirrored (2-way) \\ for Managed Volume Ingest." + }, + "managedVolumeIngestUserFacingIOPriority": { + "type": "boolean", + "description": "Whether to set MJF IO priority to USER_FACING for \\ ManagedVolume Ingest." + }, + "enableFloatingIpFailover": { + "type": "boolean", + "description": "Whether to use nodes with floating IP to distribute managed volume channels. if set to false, all nodes in the cluster will be used to distribute the channels. To correctly use floating IPs for managed volume, user has to configure as many floating IP as nodes in cluster and they should be configured on bond0 (10G) interfaces. However there are units in field which have only one floating IP per cluster (for replication and GUI failover purposes) and this single floating IP is configured on the management interface (1G) If only a single floating IP is configured, all channels will be mapped to the single node that currently owns this floating IP severly hobbling parallel ingest using RMAN channels." + }, + "cleanupResourcesRemotelyAtReset": { + "type": "boolean", + "description": "Whether to cleanup managed volume resources remotely during reset. This is important to avoid leaks after floating ip failover. We put the cleanup behind a flag in case it takes too long and slow down resets after upgrade." + }, + "cleanupExt4ResourcesRemotelyAtReset": { + "type": "boolean", + "description": "Whether to cleanup managed volume ext4 stack resources remotely during reset. This option needs 'cleanupResourcesRemotelyAtReset' to be set to true. This is important to avoid leaks after floating ip failover. We put the cleanup behind a flag in case it takes too long and slows down resets after upgrade." + }, + "enableLightweightPfc": { + "type": "boolean", + "description": "Whether to enable lightweight PFC to speed up exposing MV snapshots to the user. Lightweight PFC generates a virtual patch file backed by journals - JournalVPF, and is much faster to create. JournalVPFs will be converted to patch files in the background. JournalVPFs can potentially have a degraded read performance compared to patch files till they are converted to patch files. This config is a hint and does not guarantee a JournalVPF will be generated. SDFS may still decide to create a Patch file based on the write pattern of the ingested data." + }, + "sdfsServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to Sdfs service." + }, + "sdfsResetTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to Sdfs service for reset." + }, + "checkProcessUptimeTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout while validating channels in agent server." + }, + "validateChannelsSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Socket timeout waiting for agent server validate call." + }, + "validateChannelsTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout while validating channels in agent server." + }, + "managedVolumeNameLengthMax": { + "type": "integer", + "format": "int32", + "description": "Maximum length of managed volume name." + }, + "validateChannelsMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum attempts to make before declaring channel bad." + }, + "validateChannelsInitialBackOffInSecs": { + "type": "integer", + "format": "int32", + "description": "Initial back off during verification failures." + }, + "managedVolumeWarnCapacityPercent": { + "type": "integer", + "format": "int32", + "description": "Send warning when above this percent of channel capacity." + }, + "managedVolumeEnableWarnCapacityPercent": { + "type": "integer", + "format": "int32", + "description": "Re-enable warning when below this percent of channel capacity." + }, + "managedVolumeFloatingIpAcquireTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum wait time when acquiring the semaphore while updating managed volume channel spec during floating ip failover." + }, + "managedVolumeExportChannelSpecPersistRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when trying to update managed volume channel spec." + }, + "managedVolumeSnapshotExportJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume snapshot export job." + }, + "managedVolumeSnapshotExportInitialTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Initial timeout value for the managed volume snapshot export job." + }, + "managedVolumeSnapshotRebaseThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of empty snapshot runs after which the managed volume blobs are rebased." + }, + "managedVolumeMaxQueuedSnapshots": { + "type": "integer", + "format": "int32", + "description": "Maximum number of queued snapshots for a managed volume." + }, + "managedVolumeMke2fsOptions": { + "type": "string", + "description": "Optional parameters passed to mke2fs command line." + }, + "managedVolumeMountExt4Options": { + "type": "string", + "description": "Optional parameters passed to mount ext4 command line. Adding any option here will allow managed volumes to get mounted with that option during creation, reset and resize operations." + }, + "managedVolumeRunFsck": { + "type": "boolean", + "description": "Whether to run fsck before mounting the filesystem." + }, + "managedVolumeFsckOptions": { + "type": "string", + "description": "Optional parameters to pass to the fsck command line." + }, + "managedVolumeForceFsckOptions": { + "type": "string", + "description": "Parameters to pass to the fsck command line when it is forced." + }, + "managedVolumeUseVariableSegmentation": { + "type": "boolean", + "description": "Whether to use variable segmentation by default if dedup is enabled. Depending on the workload we may decide to override this value on a specific snappable." + }, + "managedVolumeSegmentSizeInKb": { + "type": "integer", + "format": "int32", + "description": "Default segment size in case dedup is enabled. Depending on the workload we may decide to override this value on a specific snappable." + }, + "managedVolumeEnableDedup": { + "type": "boolean", + "description": "Whether to use extent index and content index based \\ deduplication for Managed Volume ingest." + }, + "managedVolumeEnableSplitMjf": { + "type": "boolean", + "description": "Whether to use SplitMjf to ingest data." + }, + "managedVolumeUseMdCompatibleSplitMjf": { + "type": "boolean", + "description": "Whether to use SplitMjf to mount legacy mdadm stacks." + }, + "maxDirtyPagesSumLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum percentage of the system's dirty pages that can can be used in managed volume mounts. This is the sum of the dirty pages for all mounts." + }, + "maxDirtyPagesIndividualLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum percentage of the system's dirty pages that can can be used by a single managed volume mount." + }, + "snapshotReferenceOpsNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when doing atomic updates to snapshot references." + }, + "snapshotReferenceOpsInitialSleepTimeMillis": { + "type": "integer", + "format": "int32", + "description": "Min millis to sleep between retries of snapshot reference operations." + }, + "snapshotReferenceOpsMaxSleepTimeMillis": { + "type": "integer", + "format": "int32", + "description": "Max millis to sleep between retries of snapshot reference operations." + }, + "minTimeBetweenSnapshotsInSeconds": { + "type": "integer", + "format": "int32", + "description": "Minimum time between two consecutive begin snapshot calls." + }, + "maxDelaySnapshotDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum delay that can be requested in the end snapshot call." + }, + "managedVolumeSnapshotExportCachingEnabled": { + "type": "boolean", + "description": "Flag to toggle caching for snapshot exports." + }, + "managedVolumeEnableOracleMarkerRemoval": { + "type": "boolean", + "description": "If this this true, oracle marker removal will be enabled based \\ on the application tag. If this is false, oracle marker will \\ not be enabled (even if application tag says we should)." + }, + "internalMountLimitPerManagedVolume": { + "type": "integer", + "format": "int32", + "description": "Maximum number of live internal mounts at a time per Managed Volume." + }, + "unifiedViewScriptTemplatePath": { + "type": "string", + "description": "Location of unified view script template." + }, + "unifiedViewScriptFileNamePrefix": { + "type": "string", + "description": "Prefix to unified view script name. Part of the export id will be appended as well as the file extension." + }, + "unifiedViewScriptGenerationTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for unified view script generation calls." + }, + "managedVolumeSnapshotExt4Options": { + "type": "string", + "description": "Ext4 options when we start the end snapshot call. This guarantees there are no open files in read-write mode on top of the mount point." + }, + "managedVolumeExt4Options": { + "type": "string", + "description": "Ext4 options after the end snapshot call finishes and after the ext4 is first mounted by any job." + }, + "managedVolumeExt4OptionsSyncMode": { + "type": "string", + "description": "Ext4 options set to sync by default after the end snapshot call finishes and after the ext4 is first mounted by any job." + }, + "managedVolumeExt4LegacyOptions": { + "type": "string", + "description": "Ext4 options after the end snapshot call finishes and after the ext4 is first mounted by any job. This are used in legacy stacks only." + }, + "managedVolumeForceFsckOnResetAndResize": { + "type": "boolean", + "description": "Whether to run fsck before mounting the filesystem during reset." + }, + "managedVolumeBlobStoreOperationsMaxThreads": { + "type": "integer", + "format": "int32", + "description": "Determines max number of blobstore operations such as open, beginCreate, etc that can run at once in managed volume jobs." + }, + "managedVolumeNonDedupMvSizePhysicalFootprintRatio": { + "type": "number", + "format": "double", + "description": "Ratio of physical to logical size of non dedup managed volumes." + }, + "managedVolumeDedupMvSizePhysicalFootprintRatio": { + "type": "number", + "format": "double", + "description": "Ratio of physical to logical size of dedup managed volumes." + }, + "managedVolumeSyncSplitMjfBeforeDelete": { + "type": "boolean", + "description": "Whether a Managed Volume SplitMjf is synced before being deleted." + }, + "managedVolumeForceResetOnReferenceExpired": { + "type": "boolean", + "description": "If set to true, managed volume is always reset when snapshot reference expires." + }, + "managedVolumeReplaceMjfsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume replace snapshot calls per node." + }, + "failMaintainSuccessOnUnexpectedState": { + "type": "boolean", + "description": "If true, we will fail if we get an unexpected state in VERIFY_CHANNEL Stask of maintain job. This config is added mainly to help us detect failures in testing, but can also be used on erratic clusters." + }, + "invalidateManagedVolumeRootUuidRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry updating the ManagedVolumeRoot cache UUIDs." + }, + "invalidateManagedVolumeRootUuidSleepInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to update the ManagedVolumeRoot cache UUIDs." + }, + "managedVolumeBalanceChannelsDuringCreation": { + "type": "boolean", + "description": "If set to true, then try to balance the number of channels per floating IP for the newly created managed volume based on the current number of channels per floating IP." + }, + "managedVolumeMaxChannelsPerFloatingIp": { + "type": "integer", + "format": "int32", + "description": "The maximum number of allowed channels per floating IP. If the limit is crossed then export of newly created managed volume will fail. Setting it to 0 disables the limit." + }, + "managedVolumeMaxLiveMountChannelsPerFloatingIp": { + "type": "integer", + "format": "int32", + "description": "The maximum number of allowed live mount channels per floating IP. If the limit is crossed then the snapshot export will fail. Setting it to 0 disables the limit." + }, + "managedVolumeStrictEnforceFloatingIpBalancing": { + "type": "boolean", + "description": "Currently we allow MV creation to succeed if there is no floating IP configured on the cluster. IF this flag is true then this will not be allowed." + }, + "managedVolumeResetOnThriftTransportException": { + "type": "boolean", + "description": "If this is set to False, we will not reset MVs on thrift timeout and connection failures in Maintain jobs when the node is in OK state." + }, + "managedVolumeMaxMjfSnapshotsToMerge": { + "type": "integer", + "format": "int32", + "description": "Maximum number of MJF snapshots in snapshotRequestQueue which can be merged and converted into a single blobstore snapshot." + }, + "managedVolumeEnableLocklessCleanup": { + "type": "boolean", + "description": "If True, MANAGED_VOLUME_CLEANUP job will not take a lock." + }, + "managedVolumeHistoricalLeakedResourcesCount": { + "type": "integer", + "format": "int32", + "description": "How many historical managed volume leaked resources to record in metadata, to facilitate lockless cleanup." + }, + "managedVolumeResetThreadsPerNode": { + "type": "integer", + "format": "int32", + "description": "Determines max number of resetFileToSnapshot calls on a node that can run at once in a reset job." + }, + "managedVolumeReadDuringValidation": { + "type": "boolean", + "description": "If True, MANAGED_VOLUME_MAINTAIN job validation will consist of one random read to the underlying loop device." + }, + "managedVolumeBackupJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the Managed Volume backup job operations." + }, + "managedVolumeNumLoopWorkersMainExport": { + "type": "integer", + "format": "int32", + "description": "Number of workers to instantiate the loop device with, while mounting ext4 for MV main exports. TODO(vaibhav.gosain) [CDM-201723] Enable multiple loop workers for MV main exports." + }, + "managedVolumeNumLoopWorkersSnapshotExport": { + "type": "integer", + "format": "int32", + "description": "Number of workers to instantiate the loop device with, while mounting ext4 for MV snapshot exports." + }, + "slaManagedVolumeResetBeforeBackup": { + "type": "boolean", + "description": "If set to true, reset the SLA Managed Volume before every backup." + }, + "slaManagedVolumeExportRetryDelayInMinutes": { + "type": "integer", + "format": "int32", + "description": "The time interval after which to schedule the next export job for the same SLA Managed Volume, in case the current export job fails." + }, + "slaManagedVolumeClientNfsMountOptions": { + "type": "string", + "description": "NFS Mount options used by SLA Managed Volumes on Client host while mounting a managed volume share." + }, + "slaManagedVolumeClientAixMountOptions": { + "type": "string", + "description": "NFS Mount options used by SLA Managed Volumes on Client host while mounting a managed volume share." + }, + "slaManagedVolumeClientSolarisMountOptions": { + "type": "string", + "description": "NFS Mount options used by SLA Managed Volumes on Client host while mounting a managed volume share." + }, + "slaManagedVolumeClientSmbMountOptions": { + "type": "string", + "description": "SMB Mount options used by SLA Managed Volumes on Client host while mounting a managed volume share." + }, + "slaManagedVolumeClientNfsUmountOptions": { + "type": "string", + "description": "Options for the unmount command on the Linux SLA Managed Volume host." + }, + "slaManagedVolumeClientAixUmountOptions": { + "type": "string", + "description": "Options for the unmount command on the AIX SLA Managed Volume host." + }, + "slaManagedVolumeClientSolarisUmountOptions": { + "type": "string", + "description": "Options for the unmount command on the Solaris SLA Managed Volume host." + }, + "slaManagedVolumeClientSmbUmountOptions": { + "type": "string", + "description": "Options for the unmount command on the Windows SLA Managed Volume host." + }, + "slaManagedVolumeClientMountTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum wait time for mount / unmount calls to succeed on client host during SLA Managed Volumes backup jobs." + }, + "slaManagedVolumeClientDefaultScriptTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum wait time for SLA Managed Volume pre and post backup scripts to execute, unless overridden by client." + }, + "slaManagedVolumeClientScriptPollTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration in seconds for polling status of client scripts running during SLA Managed Volume backup jobs." + }, + "slaManagedVolumeOngoingBackupChannelValidationFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration in seconds for polling status of client scripts running during SLA Managed Volume backup jobs." + }, + "slaManagedVolumeClientScriptTerminateSignalTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds for sending kill signal to client scripts if terminate signal does not terminate client scripts running during SLA Managed Volume jobs." + }, + "slaManagedVolumeUnmountExt4TimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds for unmouting ext4 for SLA Managed Volumes exports during backup job." + }, + "managedVolumeResetViaSplitMJF": { + "type": "boolean", + "description": "If True, MV reset will be done via the resetSplitMJFToSnapshot API which atomically resets all the backing MJFs. Otherwise, individual backing MJFs will be reset separately via the resetFileToSnapshot API." + }, + "managedVolumeSplitMjfDeletionMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of attempts when deleting a SplitMjf." + }, + "managedVolumeSplitMjfResetMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of attempts when resetting a SplitMjf." + }, + "slaManagedVolumeExportToIngestFactor": { + "type": "number", + "format": "double", + "description": "Factor by which to divide the maximum number of SLA MV exports to obtain the maximum number of SLA MV ingests, for a given node." + }, + "slaManagedVolumeFrequentIngestFactor": { + "type": "number", + "format": "double", + "description": "Factor by which to divide the maximum number of SLA MV ingests to obtain the maximum number of slots reserved for ingests on SLA MVs with a more frequent SLA, for a given node." + }, + "slaManagedVolumeFrequentIngestMaxTimePeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time period beyond which, an SLA MV backup job will be classified as infrequent." + }, + "slaManagedVolumeFrequentIngestAdditionalExportSlots": { + "type": "integer", + "format": "int32", + "description": "The number of additional SLA Managed Volume channels which can be created on a node, for frequent backups." + }, + "managedVolumeAvailableBackupSpaceThresholdPercentage": { + "type": "integer", + "format": "int32", + "description": "Threshold, as a percentage, for the minimum amount of space available on a cluster before a Managed Volume backup starts. The Rubrik cluster cancels backups if the amount of available space is less than this threshold." + }, + "useStaticAllocationOfNodesForSlaManagedVolumeChannels": { + "type": "boolean", + "description": "If set to true, the nodes to export the SLA MV channels will be selected uniformly at random and not depending on the corresponding semaphore value and the load on that node." + }, + "checkSlaManagedVolumeExportSemaphoreConsistency": { + "type": "boolean", + "description": "If set to true, an additional check will be performed to ensure that the individual node wide export semaphore values do not exceed the cluster wide export semaphore value, on accumulation." + }, + "slaManagedVolumeSemaphoreOperationRetryMinPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Minimum time period between retries to perform SLA MV semaphore operations." + }, + "slaManagedVolumeSemaphoreOperationRetryMaxPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time period between retries to perform SLA MV semaphore operations." + }, + "refreshSplitMJFMetadataBinTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for SDFS Service refreshSplitMJFMetadataBin RPC." + }, + "useMetadataDiskForOldManagedVolumes": { + "type": "boolean", + "description": "If True, we will automatically transition old MVs to use the new metadata disk to speedup their SplitMJF Open times. In the process, during reset job, new metadata disk will be created and it will be initialized using metadata from existing MJFs. Hence the first reset for older MVs after enabling this flag will be slower, but subsequent ones are expected to be much faster." + }, + "useMetadataDiskForOldSlaMVs": { + "type": "boolean", + "description": "If True, we will automatically transition SLA based MVs to use the new metadata disk, in case they dont have one already. In the process, during backup job, new metadata disk will be created and initialized using metadata from existing MJFs. Hence the first backup job for these older MVs after enabling this flag will be slower, but later ones are expected to be faster." + }, + "enablePeriodicUpdateOfManagedVolumeSemaphores": { + "type": "boolean", + "description": "If set to true, the ManagedVolumeUpdateSemaphoreJob will be enabled and will periodically run to update the number of AlwaysMounted managed volume exports in the export semaphore corresponding to the cluster as well as the nodes." + }, + "managedVolumeUpdateSemaphoreJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between periodic update of number of AlwaysMounted MV exports in the export semaphores." + }, + "managedVolumeUpdateSemaphoreJobIntervalOnFailureInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between periodic update of number of AlwaysMounted MV exports in the export semaphores table, if the current ManagedVolumeUpdateSemaphoreJob has failed." + }, + "managedVolumeUpdateSemaphoreJobRetriesOnFailure": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the managed volume update semaphore job in case the current job instance fails." + }, + "managedVolumeUpdateSemaphoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume update semaphore jobs per node." + }, + "managedVolumeEnableMetadataDisk": { + "type": "boolean", + "description": "If disabled, we will not create and operate on metadata disk for ALL MVs (irrespective enableMetadataDiskForAlwaysMountedMV). If enabled, we will create and operate on metadata disk for SLA-based MVs, and the metadata disk usage in always-mounted MVs will depend on the value of enableMetadataDiskForAlwaysMountedMV." + }, + "enableMetadataDiskForAlwaysMountedMV": { + "type": "boolean", + "description": "If disabled, we selectively disable the RouterFileMetadataBin while creating regular style (i.e. non SLA-based) MVs." + }, + "allowedReferenceAdditionWindowForLogSec": { + "type": "integer", + "format": "int32", + "description": "Timelimit for which reference can be added for log backups after MV is opened for writes. Value -1 disables the limit." + }, + "allowedReferenceAdditionWindowForDataSec": { + "type": "integer", + "format": "int32", + "description": "Timelimit for which reference can be added for log backups after MV is opened for writes. Value -1 disables the limit." + }, + "enableTranscodeCheckInSnapshotJob": { + "type": "boolean", + "description": "If enabled, the managed volume snapshot job will throttle if the transcode queue for any sharded blob group of the base snapshot is full." + }, + "splitMjfCleanupThresholdInSeconds": { + "type": "integer", + "format": "int32", + "description": "During the managed volume cleanup, in case a SplitMjf was last modified within this threshold time, it will not be deleted." + }, + "cleanupSplitMjfOnStatFailure": { + "type": "boolean", + "description": "If enabled, the splitMjf will be cleaned up in case of a failure while calling stat on the splitMjf path." + }, + "refreshAllExportsDuringSlaManagedVolumeBackups": { + "type": "boolean", + "description": "If enabled, all the NFS exports are refreshed by running exportfs -ra while creating channels during an SLA MV backup." + }, + "tryUnexportingExt4OnNfsOrSmbUnexportFailure": { + "type": "boolean", + "description": "If enabled, we will swallow any exception thrown on NFS or SMB unexport failure and will try to unexport the remaining ext4 stack as well." + }, + "removePotentiallyDuplicateHostPatterns": { + "type": "boolean", + "description": "If enabled, we try and identify potentially duplicate host patterns in MV config when calling exportfs." + }, + "refreshAllExportsDuringManagedVolumeOperations": { + "type": "boolean", + "description": "If enabled, all the NFS exports are refreshed by running exportfs -ra while running managed volume operations." + }, + "slaManagedVolumeDebugPollFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration in seconds for polling status of debug running during SLA Managed Volume backup jobs." + }, + "slaManagedVolumeDebugTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed duration in seconds of debug running during SLA Managed Volume backup jobs." + }, + "failOnLogicalSpaceReductionPostExport": { + "type": "boolean", + "description": "When this value is true, the Rubrik cluster runs a check after the resize or reset operation to confirm that the logical space used by the Managed Volume is at least equal to the amount of logical space used before the resize or reset operation." + }, + "percentageThresholdForLogicalSpaceReductionDuringResize": { + "type": "integer", + "format": "int32", + "description": "The percentage reduction in the logical size of a managed volume main mount that we are willing to tolerate during a resize." + } + } + }, + "GlobalMssqlConfig": { + "type": "object", + "properties": { + "physicalHostLogBackupThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL log backup jobs per physical host." + }, + "mssqlAtomicUpdateRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum number of attempts to update a mssql object atomically, such as a database." + }, + "restoreMssqlDbJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry a mssql restore job in case of a failure." + }, + "logBackupJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry a mssql log backup job in case of a failure." + }, + "fileTransferParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for transferring a file from a remote host to the Rubrik cluster." + }, + "fileTransferBlockSizeForFetchInMB": { + "type": "integer", + "format": "int32", + "description": "Data block size in MB used in transferring files from remote hosts to the Rubrik cluster." + }, + "fileTransferBlockSizeForCopyInKB": { + "type": "integer", + "format": "int32", + "description": "Data block size in KB used in transferring files from Rubrik cluster during restore to the remote host." + }, + "fileRestoreReadParallelMinChunkSizeInKB": { + "type": "integer", + "format": "int32", + "description": "During buffered copy, this denotes the minimum size in KB that each part should be, when different threads read from different parts of a file." + }, + "fileRestoreWriteBlockSizeInKB": { + "type": "integer", + "format": "int32", + "description": "Data block size in KB for writing files on the host during buffered copy for restore." + }, + "queueSizePerWriterForRestore": { + "type": "integer", + "format": "int32", + "description": "Number of maximum queued entries per writer thread, during buffered copy in a restore." + }, + "fileRestoreParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for restoring a file from the Rubrik cluster to a remote host." + }, + "fileRestoreReadParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent read requests for restoring a file from the Rubrik cluster to a remote host." + }, + "fileRestoreWriteParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent write requests for restoring a file from the Rubrik cluster to a remote host." + }, + "enableVdi": { + "type": "boolean", + "description": "Enable mssql log backup and restore via VDI." + }, + "enableVdiDb": { + "type": "boolean", + "description": "Enable mssql db backup and restore via VDI." + }, + "enableGroupFetch": { + "type": "boolean", + "description": "Enable mssql group fetch of files." + }, + "skipZeroByteTransfer": { + "type": "boolean", + "description": "Enable mssql db backup to skip zero byte transfers on fetch." + }, + "vdiStreamFetchParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for loading vdi stream data from a windows host to the Rubrik cluster." + }, + "vdiBufferPageSizeInKb": { + "type": "integer", + "format": "int32", + "description": "The buffer queue element size in KB." + }, + "vdiBufferPageCount": { + "type": "integer", + "format": "int32", + "description": "The buffer queue element count." + }, + "vdiTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for VDI operations in seconds." + }, + "vdiRestoreTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for VDI restore operation to finish in seconds." + }, + "vdiDataTransferTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for VDI stream data transfer in seconds." + }, + "groupFetchTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for GroupFetch RPC operation in seconds." + }, + "vdiDbVSSTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for VDI DB Backup VSS operation in milliseconds." + }, + "vdiDbVSSSleepTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time after VDI DB VSS snapshot operation in milliseconds." + }, + "maxExtentsPerFetch": { + "type": "integer", + "format": "int32", + "description": "The maximum number of extents read in a group fetch iteration." + }, + "groupFetchPingIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Interval at which group fetch sends heartbeat requests to the windows agent." + }, + "maxMetadataStreams": { + "type": "integer", + "format": "int32", + "description": "Max number of parallel blobstore ops done by MSSQL snappable." + }, + "availabilityDbStatusWaitSecs": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to wait for responses to calls to getAvailabilityDbStatus for selecting the proper sourcing replica for an availability db backup job. A value of zero indicates to wait indefinitely. Note that a nonzero value can lead to temporary thread leakage (where a thread lives past its spawning job, but will eventually finish)." + }, + "availabilityDbCallsPerInstance": { + "type": "integer", + "format": "int32", + "description": "Number of calls per instance to make when trying to select the proper sourcing replica for an availability db backup job." + }, + "availabilityDbCallStaggerSecs": { + "type": "integer", + "format": "int32", + "description": "Seconds to stagger calls for the same instance when trying to select the proper sourcing replica for an availability db backup job." + }, + "hostForClusterInstanceWaitSecs": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to wait for responses for the calls to isHostActiveForClusterInstance for selecting the active host for a cluster instance. A value of zero indicates to wait indefinitely. Note that a nonzero value can lead to temporary thread leakage (where a thread lives past its spawning job, but will eventually finish)." + }, + "hostForClusterInstanceCallsPerHost": { + "type": "integer", + "format": "int32", + "description": "Number of calls per host to make when trying to select the active host for a cluster instance." + }, + "hostForClusterInstanceCallStaggerSecs": { + "type": "integer", + "format": "int32", + "description": "Seconds to stagger calls for the same host when trying to select the active host for a cluster instance." + }, + "logChainMakeupLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of makeup snapshot for log chain breakage between scheduled snapshots." + }, + "scheduleBackupOnRecoveryModelChange": { + "type": "boolean", + "description": "A Boolean value that determines whether to schedule an on-demand snapshot of a database when a change in the recovery model of the database is detected. When this value is 'true,' the on-demand snapshot of the database is scheduled." + }, + "scheduleBackupOnNewSnapshotNeeded": { + "type": "boolean", + "description": "A Boolean value that determines whether to schedule an on-demand snapshot of a database when the last_log_backup_lsn of the database changes to NULL. When this value is 'true,' the on-demand snapshot of the database is scheduled." + }, + "verifyIncrementalsPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of the data in a SQL Server database snapshot to verify. The Rubrik cluster verifies the specified percentage of the data before accepting the snapshot as valid. Set to 100 to verify the entire snapshot, or set to a lower number for a faster validation process. For less that 100 percent, the Rubrik cluster performs verification on randomly selected data from the snapshot. When set to 0, the Rubrik cluster does not validate the snapshot data." + }, + "verifyIncrementalsMaxBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of bytes to verify when validating a SQL Server database incremental snapshot. The Rubrik cluster performs verification on randomly selected data from the snapshot up to the specified number of bytes." + }, + "restoreDbTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for a mssql db snapshot restore operation." + }, + "logFileRestoreTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for restoring a mssql transaction log backup file." + }, + "mountDbVssRestoreTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for mounting a mssql db snapshot and get it to RESTORING state." + }, + "mountDbFinishRecoveryTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The timeout for finishing the recovery on the mounted database." + }, + "noBufferFileAlignmentBlockSize": { + "type": "integer", + "format": "int32", + "description": "Alignment block size to be used in no buffer file operations when physical sector size is not available. The value should be a multiple of the disk physical sector size." + }, + "mssqlMountPrefetchEnabled": { + "type": "boolean", + "description": "Whether to enable prefetch in mergedSpec." + }, + "keepMountUponFailure": { + "type": "boolean", + "description": "If a mount job fails, do not clean up or delete any existing state. The mount will need to be unmounted manually later." + }, + "unmountRetryAttempts": { + "type": "integer", + "format": "int32", + "description": "During the unmount job, the number of times to retry the unmounting process." + }, + "unmountRetryDelaySecs": { + "type": "integer", + "format": "int32", + "description": "During the unmount job, the number of seconds to wait between retries of the unmounting process." + }, + "mssqlScriptStatusCheckPeriodMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds after which status of a script running on host is checked." + }, + "supportedMssqlVersions": { + "type": "string", + "description": "The supported list of Mssql server versions." + }, + "enableCbtBackup": { + "type": "boolean", + "description": "Do database file backup with Changing Block Tracking, which finds the file blocks changed during 2 snapshots." + }, + "cbtTrackingSizeInBytes": { + "type": "integer", + "format": "int32", + "description": "The size of each data block in bytes that a bit in cbt bitmap represents." + }, + "cbtMaxMemoryUsageInMb": { + "type": "integer", + "format": "int32", + "description": "The max memory size in MB that a cbt driver can use." + }, + "cbtMinSnappableSizeMB": { + "type": "integer", + "format": "int32", + "description": "Minimum size of snappable, to use CBT during backup." + }, + "cbtSkipFingerprintCompute": { + "type": "boolean", + "description": "Skip fingerprint calculation on the Windows host for cbt based backup." + }, + "logBackupJobThrottlingDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Determines how long, in seconds, to delay the log backup job, if necessary." + }, + "logBackupJobRetryTimes": { + "type": "integer", + "format": "int32", + "description": "How many times to retry after the first scheduled log backup job fails." + }, + "logBackupFailureNotificationFreqInMinutes": { + "type": "integer", + "format": "int32", + "description": "Maximum frequency in minutes for which log backup failure notifications should be sent." + }, + "logChainMakeupNotificationFreqInMinutes": { + "type": "integer", + "format": "int32", + "description": "Maximum frequency in minutes for which log chain makeup notifications should be sent." + }, + "logShippingApplyStaleNotificationFreqInMinutes": { + "type": "integer", + "format": "int32", + "description": "Maximum frequency in minutes for which stale log shipping configuration notificaions should be sent." + }, + "logApplyFreqProportionToPrimaryLogBackupFreq": { + "type": "number", + "format": "double", + "description": "Specifies the proportion of the secondary's log apply job frequency compared to the primary's log backup frequency. A value smaller than 1.0 means that the log apply job will run more frequently." + }, + "enableFilestreamPartitioning": { + "type": "boolean", + "description": "Enable partitions for filestream files." + }, + "batchSnapshotMaxSnappableCount": { + "type": "integer", + "format": "int32", + "description": "Max number of databases that can be taken snapshot of, in a single batch." + }, + "batchSnapshotMaxTotalSnappableSizeInGB": { + "type": "integer", + "format": "int32", + "description": "Max size of all the databases combined, that can be fit in a single batch." + }, + "enableDatabaseBatchSnapshots": { + "type": "boolean", + "description": "Specifies whether MSSQL batch snapshots are enabled." + }, + "batchSnapshotJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the MSSQL uber jobs will poll for the completion of child batch snapshot jobs." + }, + "batchSnapshotOnDemandMaxDatabaseCount": { + "type": "integer", + "format": "int32", + "description": "The maximum number of databases allowed to be attempted in one on demand batch snapshot." + }, + "batchSnapshotOnDemandErrorMsgMaxLength": { + "type": "integer", + "format": "int32", + "description": "The maximum length allowed for an error message describing why an individual database failed during a batch snapshot. A message longer than this length should get truncated." + }, + "delayOfflineDbBackupMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. When database is offline, database backup job are delayed by the specified interval." + }, + "dbRestoreCachingEnabled": { + "type": "boolean", + "description": "Whether to enable caching during restore." + }, + "dbMountCachingEnabled": { + "type": "boolean", + "description": "Whether to enable caching during mount." + }, + "dbApplyLogsCachingEnabled": { + "type": "boolean", + "description": "Whether to enable caching during applying logs." + }, + "maxFileDownloadFailureEvents": { + "type": "integer", + "format": "int32", + "description": "The maximum number of events that we send for file download failure." + }, + "mountSnapshotSdfsServiceSocketTimeoutMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for SDFS Service calls when taking a SQL Server Live Mount snapshot." + }, + "useRouteBasedIpSelectionForMount": { + "type": "boolean", + "description": "Use route-based IP selection during Live Mount." + }, + "enableMountSnapshot": { + "type": "boolean", + "description": "Whether to enable snapshots of live mounted databases." + }, + "shardsWriteParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of shards to write concurrently." + }, + "vdiLogRestoreMaxTransferSizeInBytes": { + "type": "integer", + "format": "int32", + "description": "The MAXTRANSFERSIZE to use in VDI log restore." + }, + "cbtSessionExpirationHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours (default to 7 days) before a cbt session expires." + }, + "cbtSessionExpirationSlaMultiples": { + "type": "integer", + "format": "int32", + "description": "Time expressed in the number of SLA frequencies before a cbt session expires." + }, + "includeDescendantSlaDomainsInHierarchyApi": { + "type": "boolean", + "description": "A Boolean value that specifies whether to include the list of descendant SLA Domains for Host and Windows Cluster objects in the hierarchy." + }, + "sourceSelectorMaxInvalidReplicasToDisplay": { + "type": "integer", + "format": "int32", + "description": "The number of invalid replicas we display when finding the sourcing instance for an availability database backup." + }, + "vdiSessionExpirationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds before a VDI session expires without any read/write access to the session." + }, + "maxDataFilesForBackup": { + "type": "integer", + "format": "int32", + "description": "The maximum number of files allowed in a protected database." + }, + "logReplicationMaxLogCountPerRequest": { + "type": "integer", + "format": "int32", + "description": "Max number of logs to replicate per single replication request. This setting is read by the target cluster and sent to the source cluster, as a part of the replication request." + }, + "logReplicationMaxSnapshotCountToConsider": { + "type": "integer", + "format": "int32", + "description": "This setting is read by the target cluster, to determine how many snapshot ids to send to the source cluster." + }, + "defaultMssqlLogRetentionInHours": { + "type": "integer", + "format": "int32", + "description": "Default log backup retention time, in hours, for MSSQL databases." + }, + "waitTimeBetweenSecondaryMakeupReseedInMinutes": { + "type": "integer", + "format": "int32", + "description": "Wait time in minutes between makeup reseed attempts for a broken log chain on the primary database." + }, + "logBackupEventsTTLInDays": { + "type": "integer", + "format": "int32", + "description": "Number of days MSSQL log backup events are kept." + }, + "physicalHostDatabaseRestoreThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL database restore jobs per physical host." + } + } + }, + "GlobalNfsConfig": { + "type": "object", + "properties": { + "useNfsUploadInBlocks": { + "type": "boolean", + "description": "This is a feature toggle for NFS block uploading. When false, NFS will upload files in a single part. When true, NFS will use the same codepath as Azure for block uploading." + }, + "useNfsUploadInBlocksWithFileChannel": { + "type": "boolean", + "description": "This is a feature toggle for an improvement over NFS block uploading. NFS upload in single part uses FileChannel to copy the file, whereas the Azure codepath does not. This feature uses FileChannel together with block uploading." + }, + "defaultChunkSizeForEncryptedFileChannelTransferInMiB": { + "type": "integer", + "format": "int32", + "description": "While sending files over NFS, we use FileChannels to send data instead of using java InputStreams as FileChannels are much faster. In order to use FileChannel, we either need a file or a byte array. When we have encrypted streams, we do not want to instantiate a file from the encrypted stream. That leaves us with the option of instantiating byte arrays out of the encrypted stream. As the stream can be huge, it is not possible to simply convert the entire stream to a byte array, so we instantiate the byte arrays one chunk at a time. This config dictates the size of such a chunk." + }, + "nfsUploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "For NFS we upload files in chunks. This specifies the size of that chunk. For prod it is set to 5 Gigs." + }, + "ignoreEncryptionPasswordAndCreateUnencryptedNfsLocation": { + "type": "boolean", + "description": "We no longer allow creation of NFS archival locations without encryption. But to test backward compatibility, we still need a way to create such unencrypted locations in tests. This flag, when enabled, makes the NFS location creation code simply ignore the `encryptionPassword` REST parameter." + }, + "nfsResumableMultipartUploadEnabled": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for nfs." + }, + "nfsSupportedOptions": { + "type": "string", + "description": "Nfs supported options for Nfs archival operations. List of options is separated by a semicolon and the list of valid option values is comma separated." + }, + "nfsArchivalRequiredOptions": { + "type": "string", + "description": "List of options that must be present for every mount for archival to NFS, which we will check whenever we request an NFS archival storage handler. Other options may be present, and can differ from parameters initially used to mount the NFS target." + }, + "nfsDefaultOptions": { + "type": "string", + "description": "Nfs default options for Nfs archival operations. CSV list of options." + }, + "mountCmdTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout to use while executing NFS mount related commands." + }, + "umountCmdTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "A conservative timeout to use while executing NFS unmount related commmands. This value is set to be so based on the fact that some umount has taken up to 15 mintues (900 seconds)." + }, + "staleCmdTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout to use while executing NFS staleness check commmands." + }, + "numRetriesForMountOperations": { + "type": "integer", + "format": "int32", + "description": "Number of retries in case of failure for mount and umount operations." + }, + "sleepBetweenRetriesForMountOperationsInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration to sleep between retries in seconds for mount and umount operations." + }, + "bufferedInputStreamSizeInKBForNfsDownload": { + "type": "integer", + "format": "int32", + "description": "BufferedInputStream buffer size for NFS downloads. This option only applies if useBufferedInputStreamForNfsDownload is set to true." + }, + "nfsCheckMountFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time duration to force full validation for an NFS or Qstar archival location mount point. The operations to check for a valid mount will perform basic existence of a mount point within this time window. This helps reduce contention and avoids too frequent validation checks on mount points." + }, + "calculateChecksumOnNfsUpload": { + "type": "boolean", + "description": "Calculate and persist a checksum (an MD5 hash by default) when uploading files to NFS archival target when set to true and the locationId is available." + }, + "validateChecksumOnNfsDownload": { + "type": "boolean", + "description": "Calculate the checksum (an MD5 hash by default) when downloading a file from NFS archival if set to true, and an existing checksum for the file exists from uploading. Additionally, validate against the previously persisted checksum if calculated." + }, + "failDownloadOnChecksumMismatch": { + "type": "boolean", + "description": "If checksum validation is performed, throws an exception on a mismatch." + }, + "shouldValidateEntireNFSUpload": { + "type": "boolean", + "description": "When true, NFS file upload validations will validate the entire file." + } + } + }, + "GlobalNutanixConfig": { + "type": "object", + "properties": { + "nutanixSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the Nutanix snapshot job." + }, + "removeNutanixSnapshotRetryDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "The time period during which removing Nutanix snapshot operations should be tried again." + }, + "removeNutanixSnapshotSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to remove the Nutanix snapshot again." + }, + "removeNutanixObjectsRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry removing Nutanix objects in a job (e.g. create/export snapshot)." + }, + "removeNutanixObjectsSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to remove Nutanix objects in a job (e.g. create/export snapshot)." + }, + "nutanixClientPageSize": { + "type": "integer", + "format": "int32", + "description": "Page size to request on \"list\" queries." + }, + "nutanixIscsiQualifiedName": { + "type": "string", + "description": "The Nutanix iSCSI Qualifed Name (IQN). It is generally fixed. It's encouraged to have 1 IQN for the entire company." + }, + "shouldAllowNonValidatingNutanixClient": { + "type": "boolean", + "description": "Whether the Nutanix client allows non-validating connections. Users who want secure connections will need to generate valid certificates for their Nutanix cluster and import them to the Rubrik nodes." + }, + "tlsProtocolForNutanixClient": { + "type": "string", + "description": "TLS protocol to be used when connecting to Nutanix cluster." + }, + "shouldVerifyHostnameForNutanixClient": { + "type": "boolean", + "description": "Whether to verify name in HTTPS session when connecting to Nutanix cluster." + }, + "nutanixApiCallRetries": { + "type": "integer", + "format": "int32", + "description": "How many times to retry a Nutanix API rest call. This aims to alleviate problems from flaky networks." + }, + "nutanixApiCallRetryTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds before retrying a Nutanix API rest call." + }, + "nutanixApiServiceUnavailableRetryTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds before retrying a Nutanix API rest call. after we received a 503 (Service Unavailable) response code. Nutanix uses 503 to signal API usage overload." + }, + "enableNutanixChangedRegions": { + "type": "boolean", + "description": "Whether we should enable the use of changed regions in Nutanix snapshot jobs. Useful for disabling CBT code path if there happens to be a CBT-related bug." + }, + "shouldUseChangedRegionsForNutanixFulls": { + "type": "boolean", + "description": "Whether we should query changed regions when creating Nutanix full snapshots. Using the changed regions we can identify and drop ZEROED regions. Can be overridden if enableNutanixChangedRegions is set to false." + }, + "nutanixCbtPerNodeScalingFactor": { + "type": "integer", + "format": "int32", + "description": "Number of CBT requests a Nutanix node can concurrently handle." + }, + "shouldNutanixAllocateDiskSpace": { + "type": "boolean", + "description": "Whether or not Nutanix should allocate disk space using the disk space semaphore." + }, + "shouldThrottleNutanixCbtRequests": { + "type": "boolean", + "description": "Whether or not we should throttle CBT requests on a Nutanix cluster, which also affects if we store a host count." + }, + "shouldUseNutanixV3NodeMetadata": { + "type": "boolean", + "description": "Whether we should use Nutanix v3 API, over v2, for node metadata." + }, + "nutanixAhvHypervisorTypeString": { + "type": "string", + "description": "The string that represents an AHV hypervisor in Nutanix API." + }, + "enableNutanixClusterAhvTypeCheck": { + "type": "boolean", + "description": "Whether we should verify that the Nutanix Cluster consists of all AHV nodes on add, update, status, and refresh." + }, + "nutanixApiVersionRegexCheck": { + "type": "string", + "description": "Regex to check if this version of Nutanix API is supported." + }, + "nutanixBlockOsVersionRegexCheck": { + "type": "string", + "description": "Regex used to block specific NOS versions from being used on the Rubrik cluster." + }, + "verifyAllNutanixSnapshottableDisks": { + "type": "boolean", + "description": "Whether to verify that all the Nutanix VM's snapshottable disks are present in the new snapshot." + }, + "enableNutanixApiResponseLogTruncation": { + "type": "boolean", + "description": "Whether we allow truncation of known long Nutanix API responses such as the changed regions response." + }, + "maxNutanixApiResponseCharsAfterTruncation": { + "type": "integer", + "format": "int32", + "description": "Max number of characters of the raw Nutanix API response we will log if truncation is enabled for the response." + }, + "iscsiUtilCommandTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for iscsiadm commands issued by our IscsiUtil which is primarily used to log in and out of iSCSI targets on the Nutanix cluster." + }, + "verboseNutanixStreamLogging": { + "type": "boolean", + "description": "Whether we enable verbose logging for each region processed by the Nutanix input and output streams used in snapshot ingest." + }, + "nutanixChangedRegionsQuerySizeInMiB": { + "type": "integer", + "format": "int32", + "description": "The size of regions to process for each Nutanix changed regions API request. If non-positive, we will not specify an end offset in the request and the Nutanix API service will automatically decide it (this has been the default behavior since our 4.0 release). This toggle can be used to limit query size if certain situations require it." + }, + "refreshDurationForNutanixUnreachableNotifInMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration between posting repetitive notification for Nutanix Cluster unreachable." + }, + "enableNutanixStreamForIncrementals": { + "type": "boolean", + "description": "Whether to use our Nutanix IO streams to ingest incremental snapshots. If enabled, seperate reader and writer threads will invoke the NutanixInputStream and NutanixOutputStream. If disabled, we fall back to our single-threaded read-then-write incremental patch building process that has been in production since 4.0." + }, + "iscsiUtilLoginSleepMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep after issuing the iscsiadm login command, which may not wait for login to complete because the interface it uses with the kernel is asynchronous. Devices not showing up after iscsiadm login can be exacerbated by having a large number of targets/LUNs and because iscsid is fairly single-threaded." + }, + "udevadmTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for the udevadm commands. e.g. `udevadm settle` should be called with a timeout because it waits until all udev events have been processed." + }, + "triggerUdevBeforeDiskIdLookup": { + "type": "boolean", + "description": "Whether we should trigger processing udev events before looking for device entries in /dev/disk/by-id. This is to make sure newly connected devices have entries populated in the directory." + }, + "shouldFailBackupIfCleanupFails": { + "type": "boolean", + "description": "Whether we should fail the Nutanix snapshot job if cleanup of the ingest volume group from the Nutanix cluster fails." + }, + "shouldUseOnlySmartctlForDiskDiscovery": { + "type": "boolean", + "description": "Whether we should use smartctl instead of /dev/disk/by-id when identifying the Nutanix volume group's attached iSCSI disks." + }, + "iscsiadmSessionSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "The number of seconds to sleep before fetching iscsiadm session information after iscsiadm login. The attached scsi disks may not be available immediately after login so we use this timeout to wait for this information to be populated. This is relevant only when discovering Nutanix disks using smartctl." + }, + "shouldFailIfNumHostsIsZero": { + "type": "boolean", + "description": "A Boolean value that specifies whether to fail the Nutanix refresh job when there are no hosts found for a Nutanix cluster. When 'false,' the job continues but generates a warning log. When 'true,' the job fails by throwing an exception and generates a warning log." + }, + "exportReadSizeInMiB": { + "type": "integer", + "format": "int32", + "description": "Nutanix export job's read size when reading the snapshot disks' MJF files in SDFS." + }, + "exportBufferSizeMultiplier": { + "type": "integer", + "format": "int32", + "description": "Multiplier of the exportReadSizeInMiB for the threaded copier's buffer size." + }, + "exportWriteSizeInMiB": { + "type": "integer", + "format": "int32", + "description": "Nutanix export job's write size for the iSCSI-connected Nutanix Volume Group disks which contain the data for the new export VM." + }, + "cleanUpNutanixExportMount": { + "type": "boolean", + "description": "Whether the Nutanix export job should remove the mount dir containing the materialized snapshot disk MJF(s). It can be useful to disable clean up if we want to examine the snapshot disks used by the export job." + }, + "cleanUpNutanixExportVolumeGroup": { + "type": "boolean", + "description": "Whether the Nutanix export job should remove the Nutanix volume group we write the snapshot disk data to. It can be useful to disable clean up if we want to examine the volume group used by the export job." + }, + "shouldVerifyNutanixExportChecksums": { + "type": "boolean", + "description": "Whether we should verify the snapshot disks' sha1 hashes and exported volume disks' sha1 hashes are equal. While expensive, this can be helpful for debugging the export job." + }, + "shouldFailBackupIfBaseRemovalFails": { + "type": "boolean", + "description": "Whether we should fail the backup job if we fail to remove the base snapshot from the Nutanix cluster. In most cases we will want to fail to avoid leaking snapshots on the Nutanix cluster. However, it can be handy to continue the backup job in situations where the snapshot removal API request successfully goes through but there is a Rubrik-side bug that fails the rest of the call. In such a situation where the base snapshot is still actually removed, we may want to swallow the Rubrik-caused error." + }, + "nutanixVmRestoreCachingEnabled": { + "type": "boolean", + "description": "Whether enable cache for MJF in Nutanix export jobs or not." + }, + "nutanixVmRestoreEnableReadAhead": { + "type": "boolean", + "description": "Whether to use Sequential Read-Ahead in the Nutanix export jobs." + }, + "clientTaskTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for a Nutanix client task." + }, + "clientTaskStatusSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep before rechecking a Nutanix client task's status." + }, + "clientTaskStatusMaxIterations": { + "type": "integer", + "format": "int32", + "description": "The max number of attempts made to recheck a Nutanix client's task status before timeout. Total timeout for the rechecking a Nutanix client task status will be equal to clientTaskStatusSleepInMs * clientTaskStatusMaxIterations." + }, + "clientCreateSnapshotTaskStatusSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep before rechecking the status of the create VM snapshot operation in our Nutanix client. This is different from clientTaskStatusSleepInMs because snapshot creation is quite time-sensitive. For example, RBA-based VSS snapshots restrict a time window of around 10s to complete the snapshot." + }, + "shouldRefreshRegistrationOnAgentDisconnect": { + "type": "boolean", + "description": "Whether the Nutanix refresh job should attempt to refresh agent registration when it discovers the agent connection status is disconnected. This can add extra overhead to the Nutanix refresh job so we disable it by default. It can be enabled for specific VMs." + } + } + }, + "GlobalOracleConfig": { + "type": "object", + "properties": { + "oracleDbSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent oracle db snapshot jobs per node." + }, + "oracleLogSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent oracle log snapshot jobs per node." + }, + "oracleDefaultHostMountPath": { + "type": "string", + "description": "Default directory where oracle snappable mounts the directories on host." + }, + "oracleSdfsMountPath": { + "type": "string", + "description": "Directory where oracle snappble are mounted in Sdfs." + }, + "oracleIngestUseReedSolomon": { + "type": "boolean", + "description": "Whether to use Reed-Solomon or Mirrored (2-way) \\ for Oracle Ingest." + }, + "oracleSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the oracle snapshot job." + }, + "oracleExportJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the oracle export job." + }, + "fileRestoreRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry restoring an Oracle database file." + }, + "fileRestoreSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep duration (in milliseconds) between attempts to restore an Oracle database file." + }, + "oracleLogBackupJobRetries": { + "type": "integer", + "format": "int32", + "description": "How many times to retry after the first scheduled log backup job fails." + }, + "oracleScriptGenerationTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for oracle script generation calls." + }, + "oracleDatabaseBackupScriptName": { + "type": "string", + "description": "Name of the database backup script." + }, + "oracleDatabaseBackupScriptTemplate": { + "type": "string", + "description": "Jinja template path for the database backup script." + }, + "rmanRecoveryTagForDatabaseBackup": { + "type": "string", + "description": "Recovery tag for incremental Oracle database backups." + }, + "rmanNlsLang": { + "type": "string", + "description": "NLS_LANG environment variable for RMAN scripts." + }, + "rmanBackupFileFormat": { + "type": "string", + "description": "File format used when creating RMAN backups of database files." + }, + "rmanLogBackupFileFormat": { + "type": "string", + "description": "File format used when creating RMAN backupsets of database files in log backup." + }, + "rmanBackupFileNamePrefix": { + "type": "string", + "description": "Data file prefix used for the data files during RMAN backup. Note that this is same as the config rmanBackupFileFormat except that rmanBackupFileFormat specifies the format as recognized by Oracle while rmanBackupFileNamePrefix specifies the value for the format." + }, + "rmanControlFileNamePrefix": { + "type": "string", + "description": "Control file name prefix used for backing up control files." + }, + "rmanControlFileFormat": { + "type": "string", + "description": "File format used when backing up control files." + }, + "rmanRollforwardLogFileFormat": { + "type": "string", + "description": "File format used when renaming roll-forward archived logs." + }, + "oracleLogBackupScriptName": { + "type": "string", + "description": "Name of the log backup script." + }, + "oracleLogSwitchScriptName": { + "type": "string", + "description": "Name of the log switch script." + }, + "oracleLogSwitchScriptTemplate": { + "type": "string", + "description": "Jinja template path for the log switch script." + }, + "oracleLogBackupScriptTemplate": { + "type": "string", + "description": "Jinja template path for the log backup script." + }, + "oracleLogDeleteScriptName": { + "type": "string", + "description": "Name of the log delete script." + }, + "oracleLogDeleteScriptTemplate": { + "type": "string", + "description": "Jinja template path for the log delete script." + }, + "oracleExportScriptName": { + "type": "string", + "description": "Name of the export script." + }, + "oracleRestoreControlfileScriptName": { + "type": "string", + "description": "Name of the script to restore the controlfile before recovery." + }, + "oracleDeleteOrphanedLogsScriptName": { + "type": "string", + "description": "Name of the script to delete orphaned archivelogs after same host recovery." + }, + "oracleRenameRollforwardLogsScriptName": { + "type": "string", + "description": "Name of the script to rename rollforward archivelogs after same host rollforward recovery." + }, + "oracleRenameRollforwardLogsScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to rename rollforward archivelogs after same host rollforward recovery." + }, + "oracleExportScriptTemplate": { + "type": "string", + "description": "Jinja template path for the export script." + }, + "oracleRestoreControlfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to restore controlfile." + }, + "oracleDeleteOrphanedLogsScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to delete orphaned archivelogs after same host recovery." + }, + "oracleRacStartupScriptName": { + "type": "string", + "description": "Name of the RAC startup script." + }, + "oracleRacStartupScriptTemplate": { + "type": "string", + "description": "Jinja template path for the RAC startup script." + }, + "oracleVerifyOnlineRedoLogsScriptName": { + "type": "string", + "description": "Name of the script that verifies the online redo logs exist." + }, + "oracleVerifyOnlineRedoLogsScriptTemplate": { + "type": "string", + "description": "Jinja template path to the script that verifies the online redo logs exist." + }, + "oracleRegisterWithSrvctlScriptName": { + "type": "string", + "description": "Name of the script to register database with srvctl." + }, + "oracleRegisterWithSrvctlScriptTemplate": { + "type": "string", + "description": "Jinja template for script to register database with srvctl." + }, + "oracleCreateSpfileScriptName": { + "type": "string", + "description": "Name of the script to create SPFILE from PFILE." + }, + "oracleCreateSpfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to create SPFILE from PFILE." + }, + "oracleCreatePfileScriptName": { + "type": "string", + "description": "Name of the create pfile script." + }, + "oracleCreatePfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the create RAC primary pfile script." + }, + "oracleCreateRacAsmPfileScriptName": { + "type": "string", + "description": "Name of the create RAC-ASM pfile script." + }, + "oracleCreateRacAsmPfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the create RAC-all pfile script." + }, + "oracleShutdownInstanceScriptName": { + "type": "string", + "description": "Name of the instance shutdown script." + }, + "oracleShutdownInstanceScriptTemplate": { + "type": "string", + "description": "Jinja template path for the instance shutdown script." + }, + "oracleTablespaceExportScriptName": { + "type": "string", + "description": "Name of the the tablespace export dump script." + }, + "oracleTablespaceExportScriptTemplate": { + "type": "string", + "description": "Jinja template path for the tablespace export dump script." + }, + "oracleValidateScriptName": { + "type": "string", + "description": "Name of the the backup validation script." + }, + "oracleValidateScriptTemplate": { + "type": "string", + "description": "Jinja template path for the validate script." + }, + "oracleCrosscheckDeleteScriptName": { + "type": "string", + "description": "Name of the script to crosscheck and delete expired RMAN catalog entries." + }, + "oracleCrosscheckDeleteScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to crosscheck and delete expired RMAN catalog entries." + }, + "oracleRestorePreviewScriptName": { + "type": "string", + "description": "Name of the script to get the restore preview summary." + }, + "oracleRestorePreviewScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to get the restore preview summary." + }, + "additionalMountOptionForDbBackupOnNonSusePlatform": { + "type": "string", + "description": "Additional options for the mount command on the host for database backup. Applicable across different operating systems and setups (standalone/RAC)." + }, + "additionalMountOptionsForLogBackupOnNonSusePlatform": { + "type": "string", + "description": "Additional options for the mount command on the host for log backup. Applicable across different operating systems and setups (standalone/RAC)." + }, + "oracleStandaloneHostMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle host." + }, + "oracleAixHostMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle host." + }, + "oracleRacDefaultMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle RAC node." + }, + "oracleAixRacMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle RAC node." + }, + "oracleLiveMountStandaloneHostMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle host." + }, + "oracleLiveMountRacMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle RAC." + }, + "oracleAixLiveMountStandaloneHostMountOptions": { + "type": "string", + "description": "Options for the mount command on the oracle host on AIX." + }, + "oracleAixLiveMountRacMountNewOptions": { + "type": "string", + "description": "Options for the mount command on the oracle RAC on AIX." + }, + "oracleHostMountTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for the mount command on the oracle host." + }, + "oracleHostUnmountOptions": { + "type": "string", + "description": "Options for the unmount command on the Oracle host." + }, + "oracleAttemptLazyUnmountWhenBusy": { + "type": "boolean", + "description": "Option to attempt lazy unmount on Oracle hosts when the regular mount command fails due to busy paths." + }, + "oracleHostLazyUnmountOptions": { + "type": "string", + "description": "Options for the lazy unmount command on the Oracle host." + }, + "oracleHostUnmountTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for the unmount command on the oracle host." + }, + "oracleHostUnmountNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry unmount on the Oracle host in case of failure." + }, + "oracleHostUnmountSleepIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep interval in millis between retries of unmount on the Oracle host." + }, + "oracleDbBackupScriptName": { + "type": "string", + "description": "Name of oracle DB backup script." + }, + "oracleMinLogBackupFrequencyInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum time in mins between log backups." + }, + "oracleDefaultLogRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Default retention hours for log backups." + }, + "oracleMaxDefaultNumChannels": { + "type": "integer", + "format": "int32", + "description": "Maximum default number for the backup channels." + }, + "oracleMaxNumBackupChannels": { + "type": "integer", + "format": "int32", + "description": "Maximum number for the backup channels." + }, + "oracleNfsExportOptions": { + "type": "string", + "description": "Export options for Oracle script and channel directories." + }, + "oracleSysdbaUser": { + "type": "string", + "description": "User with sysdba privileges that can run rman backups and restores and if needed oracle queries needed for discovery and other jobs." + }, + "rmanCommandIdMaxLength": { + "type": "integer", + "format": "int32", + "description": "Maximum length for command ID supported by RMAN." + }, + "sleepTimeForCheckingRmanScriptStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checking for the rman script execution status." + }, + "sleepTimeForCheckingRmanBackupStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checking for the rman backup operation status." + }, + "sleepTimeForCheckingRmanLogBackupStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checking for the rman log backup operation status." + }, + "shouldCheckRmanLogBackupStatus": { + "type": "boolean", + "description": "Flag to decide whether to check log backup RMAN script status." + }, + "sleepTimeForCheckingJobStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checking for a job execution status." + }, + "sleepTimeForUpdatingFileMtimeInMins": { + "type": "integer", + "format": "int32", + "description": "Sleep time for updating file mtime." + }, + "snapshotFileTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout waiting for file snapshot." + }, + "sdfsStatTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout waiting for stat in SDFS." + }, + "sdfsPatchConversionTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout waiting for patch conversion." + }, + "oracleMountScriptName": { + "type": "string", + "description": "Name of oracle DB mount script." + }, + "oracleMountScriptTemplate": { + "type": "string", + "description": "Jinja template path for the live mount script." + }, + "oracleShutdownDatabaseScriptName": { + "type": "string", + "description": "Name of oracle DB shutdown script." + }, + "oracleShutdownDatabaseScriptTemplate": { + "type": "string", + "description": "Jinja template path for the shutdown database script." + }, + "oracleShutdownRacDatabaseScriptName": { + "type": "string", + "description": "Name of oracle DB shutdown script." + }, + "oracleShutdownRacDatabaseScriptTemplate": { + "type": "string", + "description": "Jinja template path for the RAC shutdown database script." + }, + "oraclePrefixPathInSdScratch": { + "type": "string", + "description": "Oracle root scratch directory." + }, + "oracleDbSidMaxLength": { + "type": "integer", + "format": "int32", + "description": "Maximum length of an Oracle database SID." + }, + "oracleDbNameMaxLength": { + "type": "integer", + "format": "int32", + "description": "Maximum length of an Oracle database Name." + }, + "oracleUnmountRetries": { + "type": "integer", + "format": "int32", + "description": "During the unmount job, the number of times to retry the unmounting process." + }, + "oracleUnmountRetryDelaySecs": { + "type": "integer", + "format": "int32", + "description": "During the unmount job, sleep duration between attempts to unmount." + }, + "oracleMountRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry NFS mount during backup and recovery jobs." + }, + "oracleMountRetryDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep duration between NFS mount retries in milliseconds." + }, + "oracleEnableDiffTreeNumRetries": { + "type": "integer", + "format": "int32", + "description": "During Oracle DB sapshot job, the number of times to retry enabling diff tree for blob store." + }, + "oracleEnableDiffTreeRetryDelayMilliSecs": { + "type": "integer", + "format": "int32", + "description": "During Oracle DB snapshot job, sleep duration in milli-seconds between attempts to enable diff tree for blob store." + }, + "expireLogsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Expire log snapshot jobs interval in minutes." + }, + "oracleUpdateOratabScriptName": { + "type": "string", + "description": "Name of the oracle update oratab script." + }, + "oracleUpdateOratabScriptTemplate": { + "type": "string", + "description": "Jinja template path for the update oratab script." + }, + "oracleRacModifyPfileScriptName": { + "type": "string", + "description": "Name of the oracle RAC modify pfile script." + }, + "oracleRacModifyPfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the RAC modify pfile script." + }, + "oracleCloneScriptName": { + "type": "string", + "description": "Name of the oracle clone script." + }, + "oracleCloneScriptTemplate": { + "type": "string", + "description": "Jinja template path for the clone script." + }, + "oracleCreateBaseDirsScriptName": { + "type": "string", + "description": "Name of script to create the base Oracle directories." + }, + "oracleCreateBaseDirsScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to create base directories." + }, + "oracleRacAsmCleanupScriptName": { + "type": "string", + "description": "Name of Oracle RAC ASM cleanup script." + }, + "oracleRacAsmCleanupScriptTemplate": { + "type": "string", + "description": "Jinja template path for the Oracle RAC ASM cleanup script." + }, + "oracleStartInstanceFromSpfileScriptName": { + "type": "string", + "description": "Name of the oracle startup instance with spfile script." + }, + "oracleStartInstanceScriptTemplate": { + "type": "string", + "description": "Jinja template path for the startup instance script." + }, + "oracleCreateInstanceScriptName": { + "type": "string", + "description": "Name of the oracle create instance script." + }, + "hostDatabaseBackupThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent database backups per host or RAC." + }, + "hostLogBackupThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent log backups per host or RAC." + }, + "systemLevelTablespaces": { + "type": "string", + "description": "Comma separated list of system-level Oracle tablespaces." + }, + "undoTablespacePrefix": { + "type": "string", + "description": "Prefix of Oracle UNDO tablespaces." + }, + "oracleBackupCachingEnabled": { + "type": "boolean", + "description": "Flag to toggle caching for Oracle database backup." + }, + "oracleRestoreCachingEnabled": { + "type": "boolean", + "description": "Flag to toggle caching for Oracle export." + }, + "oracleLiveMountCachingEnabled": { + "type": "boolean", + "description": "Flag to toggle caching for Oracle live-mount." + }, + "oracleValidationCachingEnabled": { + "type": "boolean", + "description": "Flag for toggling Oracle Validation caching." + }, + "oracleRestoreReadAheadEnabled": { + "type": "boolean", + "description": "Flag to toggle read ahead prefetch for Oracle export and live-mount." + }, + "oracleValidationReadAheadEnabled": { + "type": "boolean", + "description": "Flag to toggle read ahead prefetch for Oracle Validation." + }, + "oracleUseLongSequentialReadAhead": { + "type": "boolean", + "description": "Flag to toggle between long and semi-sequential read ahead prefetch for Oracle export and live-mount." + }, + "oracleExportMJFPrefetchEnabled": { + "type": "boolean", + "description": "Flag to toggle MJF prefetch for Oracle export. Prefetching can only be used if caching is enabled." + }, + "oracleLiveMountMJFPrefetchEnabled": { + "type": "boolean", + "description": "Flag to toggle MJF prefetch for Oracle live mount Prefetching can only be used if caching is enabled." + }, + "oracleValidationMJFPrefetchEnabled": { + "type": "boolean", + "description": "Flag to toggle MJF prefetch for Oracle Validation Prefetching can be used only if caching is enabled." + }, + "databaseBackupParallelPatchFileConversionPerNode": { + "type": "integer", + "format": "int32", + "description": "Number of parallel patch file conversions for database backups per channel/node." + }, + "logBackupParallelPatchFileConversionPerNode": { + "type": "integer", + "format": "int32", + "description": "Number of parallel patch file conversions for log backups per channel/node." + }, + "maxSubGroupsPerSuperGroup": { + "type": "integer", + "format": "int32", + "description": "Maximum number of blobstore subgroups per supergroup." + }, + "maxNumOfDataFilesSupported": { + "type": "integer", + "format": "int32", + "description": "Maximum number of subgroups in an Oracle database." + }, + "maxContentsPerSubGroup": { + "type": "integer", + "format": "int32", + "description": "Maximum number of blobstore contents per subgroup." + }, + "maxNumOfLogFilesToBeBackedUp": { + "type": "integer", + "format": "int32", + "description": "Maximum number of archived log files to back up in a log snapshot job." + }, + "auxiliaryDestinationDiskSpaceBufferPct": { + "type": "number", + "format": "double", + "description": "Percentage over the required auxiliary destination free disk space. Should be a fraction between 0 and 1." + }, + "switchLogFileTimeoutInSec": { + "type": "integer", + "format": "int32", + "description": "Timeout for Oracle switch log files command." + }, + "numOfBytesAlertLogsToCopyOnJobFailure": { + "type": "integer", + "format": "int32", + "description": "Bytes of alert logs that will get copied and pulled to cluster on job failure." + }, + "tablespaceDatafileDiskSpaceBufferPct": { + "type": "number", + "format": "double", + "description": "Percentage over the required free disk space for the tablespace datafile(s). Should be a fraction between 0 and 1." + }, + "persistOracleDbRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when persisting changes to Oracle DB." + }, + "persistOracleRacRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when persisting changes to Oracle RAC." + }, + "persistOracleHostRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when persisting changes to Oracle Host." + }, + "odmFilepathForOracle11g": { + "type": "string", + "description": "File location in Oracle home which is used to determine if DNFS is enabled in Oracle release 11g." + }, + "odmDirectoryForOracle12c": { + "type": "string", + "description": "Directory in Oracle home which is used to determine if DNFS is enabled in Oracle release 12c." + }, + "changeTimeThresholdForAgentLogsInMs": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds from when updated Linux agent logs are pulled." + }, + "enableLightweightPfcForFullDbBackup": { + "type": "boolean", + "description": "Whether to enable lightweight PFC for full database backups to speed up exposing Oracle snapshots to the user. Lightweight PFC generates a virtual patch file backed by journals - JournalVPF, and is much faster to create. JournalVPFs will be converted to patch files in the background. JournalVPFs can potentially have a degraded read performance compared to patch files till they are converted to patch files. This config is a hint and does not guarantee a JournalVPF will be generated. SDFS may still decide to create a Patch file based on the write pattern of the ingested data." + }, + "enableLightweightPfcForIncrementalDbBackup": { + "type": "boolean", + "description": "Whether to enable lightweight PFC for incremental database backups to speed up exposing Oracle snapshots to the user. Lightweight PFC generates a virtual patch file backed by journals, and is much faster to create. JournalVPFs will be converted to patch files in the background. JournalVPFs can potentially have a degraded read performance compared to patch files till they are converted to patch files. This config is a hint and does not guarantee a JournalVPF will be generated. SDFS may still decide to create a Patch file based on the write pattern of the ingested data." + }, + "restoredSpfilePrefix": { + "type": "string", + "description": "Prefix for the spfile created during recovery." + }, + "renamedSpfilePrefix": { + "type": "string", + "description": "Prefix to use for the rename of default restored spfile." + }, + "customPfileCopyPrefix": { + "type": "string", + "description": "Prefix to use for the copy of the custom PFILE created during recovery." + }, + "restoredPfileName": { + "type": "string", + "description": "Name of the pfile created from the original spfile during recovery." + }, + "restoredControlfilePrefix": { + "type": "string", + "description": "Prefix for the controlfile created during recovery." + }, + "invalidateOracleRootUuidRetries": { + "type": "integer", + "format": "int32", + "description": "The number of times to retry updating the OracleRoot cache UUIDs." + }, + "invalidateOracleRootUuidSleepInMs": { + "type": "integer", + "format": "int32", + "description": "The amount of time to sleep before trying to update the OracleRoot cache UUIDs." + }, + "defaultFraSizeForRecovery": { + "type": "integer", + "format": "int32", + "description": "Default FRA size in bytes to set during recovery when the FRA size has not been discovered." + }, + "oracleRestoreSpfileScriptName": { + "type": "string", + "description": "Name of the oracle restore spfile script." + }, + "oracleRestoreSpfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the restore spfile script." + }, + "oracleCreatePfileFromSpfileScriptName": { + "type": "string", + "description": "Name of the oracle create pfile from spfile script." + }, + "oracleCreatePfileFromSpfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the create pfile from spfile script." + }, + "scheduleLogBackupAfterDatabaseBackup": { + "type": "boolean", + "description": "Decides if an on-demand log backup should be scheduled after a database backup." + }, + "oracleExportAcquireMetadataThrottleTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "The amount of time in seconds to wait when trying to acquire the throttle for updating the metadata during host refresh." + }, + "desiredNumLogFilesPerChannel": { + "type": "integer", + "format": "int32", + "description": "The number of log files to back up on each channel. The number of channels for a log backup is determined by the number of available log files and the value of this setting, up to the user-specified limit." + }, + "oracleAgentServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to Oracle agent server service." + }, + "recoveryThrottleMaxRefCount": { + "type": "integer", + "format": "int32", + "description": "Number of recovery jobs that can run simultaneously on an Oracle host or RAC." + }, + "downloadAgentLogsTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for pulling agent logs from the host to the cluster." + }, + "postponeBackupDelayInMinsOnUnsupportedConfiguration": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Backup jobs for Oracle databases or logs that fail due to the database being in an unsupported configuration are retried after the specified interval elapses." + }, + "postponeBackupDelayInSecondsForPendingPfcs": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in seconds. Backup jobs for Oracle databases that are lagging behind in transcode (background patch file conversions) are postponed and retried after the specified interval elapses." + }, + "sgaTargetForInitialRecoveryInstance": { + "type": "string", + "description": "SGA Target parameter to be set in the pfile for the creation of the initial instance during any same host recovery. The value should be of the format [K | M | G]." + }, + "sgaMaxSizeForInitialRecoveryInstance": { + "type": "string", + "description": "SGA Max Size parameter to be set in the pfile for the creation of the initial instance during any same host recovery. The value should be of the format [K | M | G]." + }, + "oracleAlterSpfileScript": { + "type": "string", + "description": "Name of the script used to alter the spfile during restore." + }, + "oracleAlterSpfileScriptTemplate": { + "type": "string", + "description": "Jinja template path for the script to alter the parameters in the spfile during recovery." + }, + "oracleAcoExampleFileSourceDir": { + "type": "string", + "description": "Location of the example file for advanced recovery options." + }, + "oracleAcoExampleFileName": { + "type": "string", + "description": "Name of the example file for advanced recovery options." + }, + "claimMountsForFullDatabaseBackup": { + "type": "boolean", + "description": "Decides whether to claim mounts in SDFS for full database backups." + }, + "claimMountsForLogBackup": { + "type": "boolean", + "description": "Decides whether to claim mounts in SDFS for log backups." + }, + "numMaterializeFilesThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to materialize snapshots in export/mount jobs." + }, + "numParallelBlobstoreOperations": { + "type": "integer", + "format": "int32", + "description": "Number of threads to do blobstore operations." + }, + "filterDatafilesUsingRmanView": { + "type": "boolean", + "description": "Decides whether to filter datafiles in the backup directory based on the list of files backed up from the database perspective." + }, + "oracleValidateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Oracle validation jobs per node." + }, + "oracleValidateJobEnabled": { + "type": "boolean", + "description": "Specifies whether the Rubrik cluster has enabled Oracle backup validation jobs." + }, + "numParallelFileCopyThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to do file only export." + }, + "defaultSgaPercentageForValidation": { + "type": "number", + "format": "double", + "description": "Percentage of the host memeory to use as the SgaMaxSize to start the instance during the alternative host validation." + }, + "updateJobProgressInRmanTaskForDbBackup": { + "type": "boolean", + "description": "Decides whether to run Oracle queries to update job progress during the RMAN script execution in database backups." + }, + "updateJobProgressInCopyTaskForBackups": { + "type": "boolean", + "description": "Decides whether to update job progress during the COPY task in database and log backups." + }, + "exportNfsToAllInterfaces": { + "type": "boolean", + "description": "Specifies whether the backup directories on Rubrik should be exported to all Oracle host interfaces. If False, the directories are exported only to the hostname and one other interface obtained by running \"ip route\"." + }, + "avoidCatalogFilesDuringIncrementalBackup": { + "type": "boolean", + "description": "Decides whether to avoid cataloging datafiles already in the RMAN catalog during incremental database backups." + }, + "avoidCatalogFilesDuringRecovery": { + "type": "boolean", + "description": "Decides whether to avoid cataloging datafiles already in the RMAN catalog during recovery." + }, + "useUniqueBackupPathsForOracleBackup": { + "type": "boolean", + "description": "Decides whether to run Oracle backups with unique mount paths on the cluster. When enabled, the timestamp of the start of the backup job will be appended to the exported path." + }, + "remoteArchivelogRestoreDuringLiveMount": { + "type": "boolean", + "description": "Specifies whether archival logs restored during a Live Mount operation are restored to the Rubrik cluster or the local storage of the host." + }, + "shouldSetOraSdtzEnvVar": { + "type": "boolean", + "description": "Decides if the rba should try to set ORA_SDTZ before querying rman backup details and archived log details." + }, + "maxNumRecoveryChannels": { + "type": "integer", + "format": "int32", + "description": "Maximum number of channels allowed for recovery." + }, + "enableArchivedLogDeletionRetries": { + "type": "boolean", + "description": "Specifies whether archived log deletion retries are enabled." + }, + "hostLogDeletionBufferHours": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of hours determining how much older of archive logs with respect to the log retention that Rubrik support deletion on the host." + }, + "hostLogDeletionRetryHours": { + "type": "integer", + "format": "int32", + "description": "Specifies the number of hours for which the log deletion on the host is retried once the host log retention period is over." + }, + "logDeletionMetadataMaxNumValuesPerRow": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of log snapshot values that can be stored in a single row of oracle_log_deletion_metadata table." + }, + "sdfsServiceCopyFileTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Oracle-specific timeout for SDFS copyFile." + }, + "bufferTimeforScnToTimestampConversion": { + "type": "integer", + "format": "int32", + "description": "Due to the fuzziness in SCN to timestamp conversion, the endTimeFromScn might not be recoverable. Add this buffer time to endTimeFromScn to make sure the points in recoverable range are always recoverable." + }, + "maximumChannelsPerNode": { + "type": "integer", + "format": "int32", + "description": "The maximum number of channels we will assign to a single node during node selection for a single job." + }, + "oracleRollingUpgradeNodeBuffer": { + "type": "integer", + "format": "int32", + "description": "The number of CDM nodes from the upgrade queue that we disregard in node selection during rolling upgrade." + }, + "numRowsToFetchForWarningEvent": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of rows to fetch in an Oracle query for a warning event." + }, + "dataGuardDiscoveryWriteLockWaitMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time waiting for a dataguard discovery read write lock in write mode." + }, + "dataGuardDiscoveryWriteLockSleepMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between checks for the dataguard discovery read write lock in write mode." + }, + "disableTSRecoveryForDgMember": { + "type": "boolean", + "description": "Disables the tablespace request from a member database of a Data Guard group." + }, + "enableLocalLockInRacLiveMount": { + "type": "boolean", + "description": "Specifies if local lock needs to be enabled in the mount options for RAC live mount job." + } + } + }, + "GlobalPolarisConfig": { + "type": "object", + "properties": { + "polarisDefaultNumberOfSnapshotInfosPerFile": { + "type": "integer", + "format": "int32", + "description": "Number of snapshot information summaries to encode into a single file." + }, + "polarisPrefixPathInSdScratch": { + "type": "string", + "description": "Polaris root scratch directory." + }, + "useWrappedAllExport": { + "type": "boolean", + "description": "If false, we will use CQLSH COPY instead of wrappedAll export in the Polaris export task." + }, + "numProcessesToUseInCopyDuringExport": { + "type": "integer", + "format": "int32", + "description": "Number of worker processes by the CQLSH `COPY` command." + }, + "useEncryptionForCloudStorageUpload": { + "type": "boolean", + "description": "use encryption for cloud storage upload." + }, + "maxUploadAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of file upload attempts." + }, + "maxMonitoringRowsPerQuery": { + "type": "integer", + "format": "int32", + "description": "Maximum limit to be used by /polaris/job_monitoring api." + } + } + }, + "GlobalQstarConfig": { + "type": "object", + "properties": { + "useQstarUploadInBlocks": { + "type": "boolean", + "description": "This is a feature toggle for Qstar block uploading. When false, Qstar will upload files in a single part. When true, it will use the same codepath as Azure for block uploading." + }, + "qstarUploadStreamSizeInMB": { + "type": "integer", + "format": "int32", + "description": "For Qstar archival, we upload files in chunks. This specifies the size of that chunk. For prod it is set to 5 Gigs." + }, + "qstarSupportIncrementalSnapshots": { + "type": "boolean", + "description": "For Qstar archival, we upload only full snapshots. When true, incremental snapshots will be uploaded." + }, + "useQstarWebServicesForStatusChecks": { + "type": "boolean", + "description": "Use QStar webservices APIs to check space availability, file accessibility, and other conditions." + }, + "qstarSmbProtocolVersion": { + "type": "number", + "format": "double", + "description": "The SMB protocol version used when mounting Qstar. The supported versions include 3.0, 2.1, 2.0 and 1.0." + }, + "qstarEnableResumableMultiPartUpload": { + "type": "boolean", + "description": "Flag whether to use resumable multipart upload for Qstar." + }, + "qstarDownloadFingerprintSamplingRatio": { + "type": "number", + "format": "double", + "description": "Sampling ratio for fingerprint verification when downloading snapshot from Qstar." + }, + "qstarShouldStaggerUploads": { + "type": "boolean", + "description": "Flag whether to stagger uploads for file blocks to QStar archival locations." + }, + "qstarStaggerUploadIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "Time interval in milliseconds for a thread to check if it is its turn to upload the block." + }, + "qstarMaxWaitTimeForStaggerUploadInMillis": { + "type": "integer", + "format": "int32", + "description": "The maximum time for which each thread will wait to stagger uploads. A thread will continue its upload work once this time is passed to avoid infinite wait loop." + }, + "qstarMaxArchiveJobsPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "Max number of upload jobs that will be run for each QStar archival location." + }, + "qstarMaxArchiveJobsThatCanRunOnCluster": { + "type": "integer", + "format": "int32", + "description": "Max number of upload jobs that will be run for all QStar archival locations." + }, + "qstarDisableArchivalLocationConnectivityCheck": { + "type": "boolean", + "description": "Flag for user to explicitly disable QStar archival location connectivity check." + }, + "qstarEnableSpecificConnectivityCheck": { + "type": "boolean", + "description": "Flag for user to enable QStar specific checks, like QStar Daemon status check and integral volume mount status check." + }, + "qstarMaxDownloadJobsPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "Max number of download jobs that will be run for a QStar location across all nodes on the cluster." + }, + "qstarSpecificCheckEventFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between two QStar specific check events. This is to ensure that the user does not get spammed." + }, + "qstarArchivalLockFileMaxPinFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between rewriting archival lock and encryption validation files to qstar. This is to ensure that these files are not evicted from the qstar cache. This runs as a part of the archival location connectivity job." + }, + "qstarRecentReplicatedCachePagesFactor": { + "type": "number", + "format": "double", + "description": "Number used to decide the proportion of QStar replicated cache pages that can be treated as recent and left in the cache." + }, + "sleepAfterPrefetchRequestInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to sleep after issuing a prefetch request to qstar." + }, + "qstarSubObjectsToDownloadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of subobjects that can be downloaded in parallel from QStar archival." + }, + "qstarSkipFilesPinnedInCacheCheck": { + "type": "boolean", + "description": "Flag to skip the check if the lock files are already pinned in the Qstar cache. If set, the connectivity check job performs time based periodic re-pinning of the lock files. This flag is for internal purposes only." + } + } + }, + "GlobalQuicksilverConfig": { + "type": "object", + "properties": { + "enableBlackoutWindowForScheduling": { + "type": "boolean", + "description": "Whether to check blackout window for scheduling jobs." + }, + "jobFetcherSleepTimeSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time between each iteration of JFL. The actual time will be longer if the job scan is slow." + }, + "jflStartTimeEnqueueOffsetSeconds": { + "type": "integer", + "format": "int32", + "description": "Queue jobs if their start times are at most this far in the future. This helps to reduce latency between JFL iterations. The default is to match jobFetcherSleepTimeSeconds, but setting this higher may help on clusters with slow job scans." + }, + "maxJobFetcherStalenessMillis": { + "type": "integer", + "format": "int32", + "description": "Status server will report unhealthy status if no useful work has been done for this span of time." + }, + "jflSleepBetweenDequeuesInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between dequeueing JobInstances." + }, + "jobFetcherStealThresholdMillis": { + "type": "integer", + "format": "int32", + "description": "Time to wait for JobFetcherLoop before trying to steal a job. This is currently only used for jobs with affinity. When a node is in a bad state we use assumeJobFetcherServiceInterrupted." + }, + "jobCategoriesRefreshIntervalSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait between queries for the current jobCategories. Nodes cache the map because it's read very often, and deserializing it is somewhat expensive." + }, + "preMaintenanceSleepTimeMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep for JobFetcherLoop when the current node is in pre-maintenance state. It will check back after this time to see if the pre-maintenance is done." + }, + "upgradeSleepTimeMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep for JobFetcherLoop when a cluster upgrade is in progress. It will check back after this time to see if upgrade is done." + }, + "quiesceSleepTimeMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep for JobFetcherLoop when the current node is quiescing. It will check back after this time to see if quiescing has completed." + }, + "assumeJobFetcherServiceInterruptedWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Number of ms to assume it takes job-fetcher service to be interrupted after a node goes to BAD or UPGRADE status. This is used for stealing jobs from BAD nodes and in rolling upgrades." + }, + "jflMaxRetriesPerShard": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry fetching a shard before giving up." + }, + "jflNumberOfShards": { + "type": "integer", + "format": "int32", + "description": "Number of nodes which should scan each shard of the job_instance table within JFL." + }, + "jflShardGranularity": { + "type": "integer", + "format": "int32", + "description": "Determines how granular the sharding of job_instance is. The smallest-weight node takes jflShardGranularity * jflReplicationFactor shards, and all other nodes take a proportional number of shards based on their own weights (determined by the local config jflShardWeight)." + }, + "jflShardRetryDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Time to wait between shard fetch retries." + }, + "emptySemShareWarningLogProbability": { + "type": "number", + "format": "double", + "description": "Probability of logging a warning when a job can't run due to its semShares config being set to 0. These warnings get very spammy, so we only do it a fraction of the time in prod mode." + }, + "jflTimeBetweenMoveReadyToStartJobsInMs": { + "type": "integer", + "format": "int32", + "description": "Time to wait between each iteration of moving all the ready to start jobs to the job queue in JobInstanceExecutor." + }, + "ignoreJobTags": { + "type": "boolean", + "description": "Whether job tags should be taken into consideration in setting affinity nodes. Tags are used if set to false, and ignored if set to true." + }, + "defaultAffinityTimeoutMillis": { + "type": "integer", + "format": "int32", + "description": "Default timeout after which a job with affinity may be stolen by a node it doesn't have affinity for." + }, + "claimDelayForRedundantNodesMs": { + "type": "integer", + "format": "int32", + "description": "After we enqueue a job on extra nodes for redundancy, the extra nodes will not claim the job until this many ms after the job's start time. For child jobs, this value is configured seperately, in parallelizableTaskClaimDelayForRedundantNodesMs." + }, + "parallelizableTaskClaimDelayForRedundantNodesMs": { + "type": "integer", + "format": "int32", + "description": "After we enqueue a child job on extra nodes for redundancy, the extra nodes will not claim the job until this many ms after the job's start time." + }, + "parallelizableTaskReplicationFactor": { + "type": "integer", + "format": "int32", + "description": "Number of nodes each child job instance should be queued onto by the child monitoring loop." + }, + "extraJobRetryOnUpgradeLimit": { + "type": "integer", + "format": "int32", + "description": "If a job has at least this many retries, JFL will refuse to give the job bonus retries for upgrade-related failures. Without this limit, failures in certain callbacks (such as onTermination) could cause JFL to give a job infinite retries during an upgrade." + }, + "gcJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for snapshot metadata GC jobs in minutes." + }, + "gcJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent snapshpt metadata GC jobs per node." + }, + "hostLogCleanupJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for HostLogCleanup jobs in minutes." + }, + "pruneJobFrequencyInMins": { + "type": "integer", + "format": "int32", + "description": "Interval for Prune Job in minutes." + }, + "pruneJobPruningAgeForManagedJobsInHours": { + "type": "integer", + "format": "int32", + "description": "Min number of hours to keep a managed job instance around. It gets archived after this many hours." + }, + "pruneJobPruningAgeForOneOffJobsInHours": { + "type": "integer", + "format": "int32", + "description": "Min number of hours to keep a one-off job instance around. It gets archived after this many hours." + }, + "pruneJobPruningCount": { + "type": "integer", + "format": "int32", + "description": "The max number of instances per managed job allowed in the job_instances table at any given time. Older instances will be archived. Fewer will be kept if they're older than pruneJobPruningAgeForManaged/OneOffJobsInHours." + }, + "numArchivedJobInstancesToKeep": { + "type": "integer", + "format": "int32", + "description": "Number of archived instances of each job to keep in the archived_job_instance table." + }, + "pruneChildJobInfoFrequencyInMins": { + "type": "integer", + "format": "int32", + "description": "Interval for Prune Child Job Info Job in minutes." + }, + "pruneChildJobInfoInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Prune Child Job Info jobs per node." + }, + "auditJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to Audit jobs in minutes." + }, + "auditJobMaxTimeInMinsForJobToRun": { + "type": "integer", + "format": "int32", + "description": "Max time allowed for a job to run." + }, + "auditJobContinuousFailedCountThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of continuous failed jobs that will trigger an alert." + }, + "auditJobConsiderationPeriodInHours": { + "type": "integer", + "format": "int32", + "description": "Audit job takes a time interval from the start time of the job for which it will audit the jobs." + }, + "auditJobRunningStagnationThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours for a job to be in Running state without making progress before we consider it stagnant." + }, + "auditJobStagnationThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours for a job to be considered \"stagnant\"." + }, + "auditYieldedJobStagnationThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours for an yielded job to be considered \"stagnant\"." + }, + "auditJobCancelationThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours for a job's cancelation to be considered failed." + }, + "auditJobCrashLoopThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of claim times for a job instance to be considered stuck in a crash loop." + }, + "jobMaintainerFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Job maintainer interval in minutes." + }, + "perNodeJobMaintainerFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Per-node job maintainer interval in minutes." + }, + "jobMaintainerProcessJobMethodsNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads to be used by job maintainer to process objects / snappables." + }, + "jobMaintainerProcessJobMethodsTimeoutMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes after which per_node_job_maintainer times out and is considered failed." + }, + "parallelizableTaskParentMonitorChildrenIntervalSec": { + "type": "integer", + "format": "int32", + "description": "Number of seconds between each time a parent job will check on the status of its children." + }, + "parallelizableTaskEnableDefaultMaxChildrenPerCluster": { + "type": "boolean", + "description": "True iff the default throttle in parallelizableTaskDefaultMaxChildrenPerCluster is allowed to take effect. If false, then no default cluster-wide throttle is applied to concurrently running child jobs from the same parent." + }, + "parallelizableTaskDefaultMaxChildrenPerCluster": { + "type": "integer", + "format": "int32", + "description": "Default maximum number of concurrently running child jobs for a parent job. If this value is negative, the number of healthy nodes in the cluster is used. This default limit is only enacted if parallelizableTaskEnableDefaultMaxChildrenPerCluster is true." + }, + "parallelizableTaskChildRetryLimit": { + "type": "integer", + "format": "int32", + "description": "Number of times a parallelizable task's child job(s) can each be retried." + }, + "enableUndoSuccedingForDummyParallelizableJob": { + "type": "boolean", + "description": "Toggle for enabling UndoSucceeding for an internal test Parallelizable job. If set to false, the onParentFailure action will be set to Continue." + }, + "spaceFillerSizeForDummyParallelizableJobKB": { + "type": "integer", + "format": "int32", + "description": "Size of \"spaceFiller\" config field (if used) for an internal test Parallelizable job. This size is reached using a rough calculation method that may not be precise." + }, + "childJobRequeueDelaySec": { + "type": "integer", + "format": "int32", + "description": "Time by which to delay a parallel child job if it fails to acquire throttles." + }, + "throttleRetryMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Max times we will retry attempting to grab a throttle due to certain failures known to be Cassandra flakiness." + }, + "throttleRetryInitialMaxSleepTimeMs": { + "type": "integer", + "format": "int32", + "description": "Between retries attempting to grab a throttle due to certain failures, we will at most sleep this amount of time for the first retry." + }, + "enableLiveJobFetcher": { + "type": "boolean", + "description": "Determine if we should enqueue jobs directly onto the job instance executor or wait for JFL to pick them up." + }, + "ljfDefaultReplicationFactor": { + "type": "integer", + "format": "int32", + "description": "Number of nodes each job instance should be queued onto." + }, + "ljfYieldedJobQueueDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Interval in seconds between checks on yielded job to determine whether or not it is ready to progress." + }, + "ljfSleepBetweenDequeuesInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep time between dequeueing JobInstances." + }, + "ljfScanJobInstancesDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Delay between JFL iterations when LJF is enabled." + }, + "ljfNumberOfShards": { + "type": "integer", + "format": "int32", + "description": "Number of nodes which should scan each shard of the job_instance table within JFL when LJF is enabled." + }, + "jobTypeToProfilingExpiration": { + "type": "string", + "description": "Map from job type to when profiling for that job type should stop. Allows a way to internally bulk profile a particular job type. This is empty by default but can be modified with QuicksilverTool." + }, + "opentracingSamplingStrategy": { + "type": "string", + "description": "Jaeger Opentracing strategy. The default strategy samples at 0.000001 probability." + }, + "opentracingSamplingDurationMsec": { + "type": "integer", + "format": "int32", + "description": "Frequency by which new Sampling strategies are polled by Jaeger." + }, + "queryStatsProfilerEmitPerTableMetrics": { + "type": "boolean", + "description": "If enabled, emit per table query stats metrics for every job." + }, + "jflThreadMonitorPollingIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait between runs of JflThreadMonitor to detect stuck threads." + }, + "jflHungThreadCheckLogThresholdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Log stack trace for tasks which have been stuck on the same stack trace for more than this duration." + }, + "jflHungThreadCheckLogIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval at which to log stack traces for stuck threads." + }, + "defaultMaxRuntimeHistoryLength": { + "type": "integer", + "format": "int32", + "description": "Default number of job runtimes to keep in history." + }, + "cleanupLeakedTaskStatusThresholdInHours": { + "type": "integer", + "format": "int32", + "description": "Threshold after which a component will run cleanup on its in-memory data structure to store the running task information in its job service." + }, + "shortTaskPollingDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Interval in milliseconds at which to poll the status of a running task, corresponding to a short polling duration." + }, + "mediumTaskPollingDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Interval in milliseconds at which to poll the status of a running task, corresponding to a medium polling duration." + }, + "longTaskPollingDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Interval in milliseconds at which to poll the status of a running task, corresponding to a long polling duration." + }, + "maxBatchedPendingUpdatesForJobProgress": { + "type": "integer", + "format": "int32", + "description": "When using the job progress manager in a batched mode to track subtask level progress per task in a job, the updates are batched together in memory for the specified minimum amount of time. This config parameter helps override the time duration if more updates are pending than this limit, and persist the updates." + }, + "isRollingUpgradeInProgress": { + "type": "boolean", + "description": "Flag to indicate whether rolling upgrades is in progress." + }, + "shouldUseCodeVersionAPI": { + "type": "boolean", + "description": "Flag to indicate whether to use CodeVersion API." + }, + "RUTargetVersion": { + "type": "string", + "description": "Field to indicate target rolling upgrade version." + }, + "RUFromVersion": { + "type": "string", + "description": "Field to indicate cluster rolling upgrade version." + }, + "checkForBreakpoints": { + "type": "boolean", + "description": "Field to indicate if breakpoints should be applied in components." + } + } + }, + "GlobalReplicationConfig": { + "type": "object", + "properties": { + "maxPullContentRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for pull content task." + }, + "delayPullContentRetryInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time in seconds to sleep before retrying pull content task." + }, + "updateReplNodesMaxRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries to update replication node info." + }, + "snapshotReplicationJobNoWorkDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay the job if there is no work to do." + }, + "logReplicationJobNoWorkDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay the job if there is no work to do." + }, + "logReplicationJobDelayRangeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time by which the log replication job should be delayed if it can't aquire resources other than the network-specific throttle." + }, + "snapshotReplicationThrottlingMessageExtraDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to delay a message about throttling snapshot replication since the last replicated snapshot, in addition to the snapshot backup frequency based on SLA." + }, + "logReplicationThrottlingMessageExtraDelayInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to delay a message about throttling log replication since the last replicated log, in addition to the log backup frequency." + }, + "remoteBlobStoreHandleGracePeriodInHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to retain remote handles on blob store contents before cleaning it up as leaked handles." + }, + "maxParallelSubObjPullsPerNodeInMemSemShares": { + "type": "integer", + "format": "int32", + "description": "Max number of parallel sub-object pulls per node." + }, + "maxParallelSubObjPullsPerJob": { + "type": "integer", + "format": "int32", + "description": "Max number of parallel sub-object pulls." + }, + "maxParallelSubObjPullsPerNodePerJob": { + "type": "integer", + "format": "int32", + "description": "Max number of parallel sub-object pulls per node per job." + }, + "releaseReplicateResourceMaxRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries in releasing replication resource." + }, + "enableNetworkThrottle": { + "type": "boolean", + "description": "Enable network throttling function." + }, + "networkThrottleFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for running the network throttler job in minutes." + }, + "networkBandwidthOverheadFraction": { + "type": "number", + "format": "double", + "description": "Fraction of total bandwidth to reserve for overhead." + }, + "maxNetworkThrottleUpdateRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries while updating network throttle limits and tokens." + }, + "manageNetworkThrottleDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Initial delay for mange network throttle thrift call retries in milliseconds." + }, + "pullReplicateNetworkThrottleNumOfTokens": { + "type": "integer", + "format": "int32", + "description": "Number of tokens to request for one replication task in flat file or content pulling." + }, + "minBandwidthForReplicationInMbps": { + "type": "number", + "format": "double", + "description": "Specifies the minimum replication bandwidth required in megabits per second." + }, + "minBandwidthPerReplicationJobInMbps": { + "type": "number", + "format": "double", + "description": "Specifies the minimum replication bandwidth required for each replication job in megabits per second." + }, + "remoteClusterConnectivityCheckFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each connectivity check run." + }, + "remoteClusterConnectivityCheckPingRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when pinging a node for remote replication cluster connectivity check." + }, + "remoteClusterConnectivityFailureNotificationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between subsequent notifications for remote cluster connectivity failures." + }, + "remoteClusterRetries": { + "type": "integer", + "format": "int32", + "description": "Default number of retries when trying to contact remote cluster for metadata service." + }, + "remoteClusterRetryInitialIntervalInMs": { + "type": "integer", + "format": "int32", + "description": "Default initial interval between remote cluster metadata service retries." + }, + "shardSize": { + "type": "integer", + "format": "int32", + "description": "Size of a shard used for fetching snappableIds from source cluster." + }, + "maxGetThriftProtocolVersionRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries getting replication thrift protocol version." + }, + "getThriftProtocolVersionDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Initial delay for get protocol version thrift call retries in milliseconds." + }, + "maxEnableReplicationSourceRetries": { + "type": "integer", + "format": "int32", + "description": "Max number of retries enabling replication locations." + }, + "getEnableReplicationSourceDelayInMs": { + "type": "integer", + "format": "int32", + "description": "Initial delay for enabling replication location retries in milliseconds." + }, + "polarisReplicationSourceRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Max number of Polaris replication source refresh jobs per node." + }, + "polarisReplicationSourceSnappableConfigBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number of snappables whose configs are updated together when replicating from a Polaris source." + }, + "polarisSourceObsoleteLimitInDays": { + "type": "integer", + "format": "int32", + "description": "Number of days to start to obsolete Polaris metadata." + }, + "polarisMaxIndividualCleanUpRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for each individual clean up calls performed within Polaris replication PullContent tasks. This is introduced to free up resources usd in Polaris replication PullContent task with best effort without failing the replication task itself. If clean up calls cannot reach to sync in the number of retries, we let cleanUp task of the Polaris replication job handle it later." + }, + "polarisIndividualCleanUpRetryDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Duration in seconds delayed between individual clean up retries within Polaris replication PullContent task." + }, + "polarisExtraSpaceConstantMultiplierForStorageBufferOnBolt": { + "type": "number", + "format": "double", + "description": "Used to allocate storage buffer in Bolt for creating fingerprint files." + }, + "cloudReplicationMinWaitTimeForFpCreationInMs": { + "type": "integer", + "format": "int32", + "description": "Minimum wait time before polling for fingerprint creation." + }, + "polarisReplicateAllowedCoordinatorCommitDelayInSecs": { + "type": "integer", + "format": "int32", + "description": "Allowed polaris replicate coordinator commit window in seconds before failing jobs." + }, + "polarisMaxParallelNumObjectsPulledPerJob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of threads we use in pull fingerprint files and shards for Polaris replication." + }, + "polarisMaxNumPullRequestsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of requests a Bolt can handle during pull replicate of data from Polaris." + }, + "polarisSleepDurationForWaitQueuingShardsInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep duration to wait for other worker threads to queue shards to pull during PolarisPullReplicatePullContent task." + }, + "polarisFetchConverterLogsInCaseOfSuccess": { + "type": "boolean", + "description": "Flag used to indicate if PolarisPullReplicate job should fetch converter logs from converter in case of conversion success." + }, + "selectTargetNodeWithPingTestAttempts": { + "type": "integer", + "format": "int32", + "description": "Number of attempts to select target node with ping test." + }, + "maxRefreshChildJobPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of replication source cluster refresh child job to run per node." + }, + "enableMultipleTargetsPerSla": { + "type": "boolean", + "description": "Allow adding more than one replication targets to one Sla Domain." + }, + "maxReplicationTargetsPerSla": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed replication targets per Sla Domain." + }, + "enableReplicationTargetGlobalPause": { + "type": "boolean", + "description": "Allow replication target global pause to be enabled." + }, + "maxSourceClusterRefreshChildJobs": { + "type": "integer", + "format": "int32", + "description": "Maximum number of replication source cluster refresh child jobs to run." + }, + "maxReplicatingMetadataUpdateRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries while updating snappable replicating metadata." + }, + "sendEventsRemoteJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent send events remote jobs per node." + }, + "enableRemoteClusterCoreDumps": { + "type": "boolean", + "description": "The flag to enable remote cluster core dump." + }, + "replicationCleanUpFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each replication clean up run in minutes. Default to be 1 day." + }, + "replicationCleanUpExclusionPeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to exclude remote mounting points from replication clean up consideration. Default to be 1 day." + }, + "replicationCleanUpExpirationPeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to retain replication remote mounting points from being clean up. Default to be 60 days." + }, + "replicationSendEventsRemoteFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of time, in minutes, between each replication send events remote job run. The default is 10 minutes." + }, + "sourceClusterObjectRefreshChildRetryLimit": { + "type": "integer", + "format": "int32", + "description": "Number of times source cluster object refresh's child job(s) can each be retried." + }, + "minMssqlLogsToReplicatePerChild": { + "type": "integer", + "format": "int32", + "description": "Minimum number of logs to replicate for mssql log replicate child task." + }, + "maxMssqlChildTasksPerJob": { + "type": "integer", + "format": "int32", + "description": "Max number of child tasks to run per mssql log replicate job." + }, + "pullReplicateBlobChildRetryLimit": { + "type": "integer", + "format": "int32", + "description": "Number of times pull replicate blob's child job(s) can each be retried." + }, + "runArchiveObjectsWithoutSourceOncePerNumberOfInstances": { + "type": "integer", + "format": "int32", + "description": "Run ArchiveObjectsWithoutSource task once every this number of source cluster refresh job instances. Default to be 20. If the source cluster refresh is scheduled to run every 15 minutes, ArchiveObjectsWithoutSource task will be run once 20 * 15 minutes by default." + }, + "sourceClusterRefreshJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of job retries for SOURCE_CLUSTER_REFRESH jobs. This value is unrelated to task retries and the default number of retries set by JFL is 2." + }, + "sourceClusterExclusionPatternRefreshPageSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of exclusion patterns to fetch from the source cluster each iteration." + }, + "jobLevelMetrics": { + "type": "boolean", + "description": "Allow replication metrics to be collected per pull replicate job." + }, + "compressionMetrics": { + "type": "boolean", + "description": "Allow compression metrics to be collected." + }, + "shouldSkipProgressPosting": { + "type": "boolean", + "description": "If true, skip posting local job status and skip delegating job status to source cluster." + }, + "shouldPullOnMultipleNodes": { + "type": "boolean", + "description": "Should make handlePullContent, handlePullExtentIndex and handlePullFlatFile requests on other local nodes besides the node the PULL_REPLICATE job is running on." + }, + "replicationSocketTimeoutInMsForSnaphshotProxy": { + "type": "integer", + "format": "int32", + "description": "Replication service socket time out in milliseconds. This timeout is used exclusively for snapshot service proxy requests. Snapshot service proxy requests can have a long duration so timeout is adjusted accordingly." + }, + "initialProxyPullContentRetryBackOffInSecs": { + "type": "integer", + "format": "int32", + "description": "If replication proxy requests are enabled, Pull Content will have an expontential back off on retry to avoid overwhelming Snapshot service with too many requests. This value is the intial back off on the first retry. It will be expontentially increased on subsequent retries." + }, + "maxProxyPullContentRetryBackOffInSecs": { + "type": "integer", + "format": "int32", + "description": "If replication proxy requests are enabled, Pull Content will have an expontential back off on retry to avoid overwhelming Snapshot service with too many requests. This value is the maximum backoff between retries." + }, + "maxParallelOpenSnappableThreads": { + "type": "integer", + "format": "int32", + "description": "Maximum number of threads used to make parallel requests for openSnappable. This value should be adjusted with caution. This value affects all PULL_REPLICATE jobs and is magnified to source cluster as parallel requests. Increasing this value can expotentially increase the number of requests handled by source at any point in time." + } + } + }, + "GlobalSapHanaConfig": { + "type": "object", + "properties": { + "enableSapHanaProtection": { + "type": "boolean", + "description": "Whether SAP HANA protection is enabled on the cluster or not." + }, + "sapHanaPersistRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries when persisting changes to SAP HANA tables." + }, + "sapHanaAgentServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to SapHana agent server service." + }, + "sapHanaSqlQueryTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout while running sql query in sap hana system." + }, + "sapHanaSystemRefreshJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for SAP HANA system refresh jobs in minutes." + }, + "sapHanaSystemRefreshJobNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the SAP HANA system refresh job." + }, + "sapHanaDataSnapshotJobNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the SAP HANA data snapshot job." + }, + "sapHanaDriverPath": { + "type": "string", + "description": "Driver path used to establish ODBC connection." + }, + "sapHanaMinLogSnapshotJobIntervalInMins": { + "type": "integer", + "format": "int32", + "description": "Minimum allowed interval for SAP HANA log snapshot job in minutes." + }, + "sapHanaLogSnapshotJobNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the SAP HANA log snapshot job." + }, + "sapHanaLogSnapshotDelayInMinsOnBadStatus": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Log snapshot jobs for SAP HANA logs that fail due to the database being in a bad status (ERROR or UNKNOWN) are retried after the specified interval elapses." + }, + "sapHanaDeleteSystemJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete SAP HANA system jobs per node." + }, + "postponeBackupDelayInMinsOnBadStatusAndConfiguration": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes that the Rubrik cluster waits before retrying a SAP HANA database backup job that was throttled due to unsupported configuration or bad system status." + }, + "postponeBackupDelayInMinsOnBackintNotConfigured": { + "type": "integer", + "format": "int32", + "description": "Interval, in minutes, the Rubrik cluster waits before retrying a SAP HANA database backup job that was throttled because the SAP HANA database was not configured for backint." + }, + "postponeBackupDelayInMinsOnNoFullBackupAfterProtection": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes that the Rubrik cluster waits before retrying a SAP HANA database delta or log backup job that was throttled due to no full data snapshot since the protection date of the database." + }, + "skipSyncDataBackupCatalogTask": { + "type": "boolean", + "description": "If set to true, sync backup catalog task of data snapshot job is skipped." + }, + "sapHanaBackupStatusPollingFrequencyMs": { + "type": "integer", + "format": "int32", + "description": "Specifies a frequency for polling the status of a SAP HANA database backup." + }, + "sapHanaMaxBackupWaitDurationInMins": { + "type": "integer", + "format": "int32", + "description": "Maximum polling period to wait for the backup to complete." + }, + "sapHanaMaxAgentCallRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum retry for any RBA agent thrift call." + }, + "sapHanaMaxBackupEntriesToSyncPerLogSnapshotJob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of backup entries that a sapHanaLogSnapshot job should sync from the catalog in a single run." + }, + "maxNumberOfFailedLogBackupIdsToShowInEvent": { + "type": "integer", + "format": "int32", + "description": "Maximum number of backup IDs of failed log backups that can be shown in FailedSapHanaLogBackupsDetected event." + }, + "sapHanaTimeGapThresholdInMins": { + "type": "integer", + "format": "int32", + "description": "Maximum clock difference threshold between Rubrik cluster and SAP HANA system in minutes." + }, + "incrementalBackupMinuteFrequencyEnabled": { + "type": "boolean", + "description": "If set to true, defining incremental backup frequency in minutes will be allowed." + }, + "differentialBackupMinuteFrequencyEnabled": { + "type": "boolean", + "description": "If set to true, defining differential backup frequency in minutes will be allowed." + }, + "sapHanaFindBaseFullOfRrInLogSnapshotJobTimeLimitInMins": { + "type": "integer", + "format": "int32", + "description": "Maximum time duration (since the first log snapshot in the latest recoverable range) until which an attempt will be made to find the base full of that recoverable range in SAP HANA Log Snapshot job." + }, + "sapHanaEnableLowerBoundInFindBaseFullOfRr": { + "type": "boolean", + "description": "Whether to enforce a lower bound on the backup ID of a full snapshot which can support a recoverable range." + }, + "expireLogsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for SAP HANA log expiry jobs in minutes." + }, + "overlappingBackupJobsWindowInMins": { + "type": "integer", + "format": "int32", + "description": "The time window during which an incremental backup job overlaps with the next scheduled full backup job." + }, + "sapHanaDbIntegrityJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for SAP HANA DB Integrity Job in minutes." + }, + "sapHanaMinSleepPerRetryMs": { + "type": "integer", + "format": "int32", + "description": "Minimum sleep duration (in milliseconds) between retries while interacting with metadata store or other services." + }, + "sapHanaMaxSleepPerRetryMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep duration (in milliseconds) between retries while interacting with metadata store or other services." + }, + "enableSapHanaDefaultGcpDataPath": { + "type": "boolean", + "description": "Specifies whether systems being added default to the GCP data path." + } + } + }, + "GlobalShieldConfig": { + "type": "object", + "properties": { + "useHierarchyCacheService": { + "type": "boolean", + "description": "boolean flag to use hierarchy cache service or not." + }, + "roleDeletionMaxBatchSize": { + "type": "integer", + "format": "int32", + "description": "Maximum batch size for deleting role/organization." + }, + "authzContextCacheEvictionIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Duration to cache authz context." + }, + "sessionCacheEvictionIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Session cache interval implements Session expiration duration. Microsoft uses a default value of 36,000 seconds for Kerberos service tickets; our expiration interval is conservative. Ideally, Session Cache interval should track authzContextCacheEvictionIntervalSecs. However, it is possible for someone to misconfigure authzCache and so we create a shadow cache." + }, + "authzContextCacheMaxNumElements": { + "type": "integer", + "format": "int32", + "description": "Number of elements in authz context cache." + }, + "noCliLdapGroupMembershipCacheRefreshSecs": { + "type": "integer", + "format": "int32", + "description": "Interval between requests for LDAP Servers to provide refreshed group membership details. Manually restart spray service to apply a change to this value." + }, + "noCliLdapGroupMembershipCacheEvictionSecs": { + "type": "integer", + "format": "int32", + "description": "Period after which the group associations for any users that are dropped from group membership are purged from the internal cache. The Rubrik cluster does not recognize group membership changes until this purge action occurs. Manually restart spray service to apply a change to this value." + }, + "noCliLdapGroupMembershipCacheMaxNumElements": { + "type": "integer", + "format": "int32", + "description": "Maximum number of group membership entries in the internal cache. Cached associations speed authentication, However, significantly increasing this value will impact the available memory on the Rubrik cluster and affect overall performance. Manually restart spray service to apply a change to this value." + }, + "ldapsTlsProtocol": { + "type": "string", + "description": "Name of the algorithm to use for secure LDAP communication (TLS and StartTLS). Refer to https://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#SSLContext for the names of supported algorithms. Caution, choosing a weaker algorithm can violate your company security requirements." + }, + "ldapFollowReferrals": { + "type": "boolean", + "description": "Boolean value that determines whether to follow LDAP query referrals. The default value is true which permits the Rubrik cluster to follow a referral from one LDAP server to another LDAP server. When required by the LDAP administrator and with due caution, set the value to false to force the Rubrik cluster to ignore LDAP referrals. Use caution because ignoring LDAP referrals breaks the normal relationship between LDAP servers and can break normal authentication." + }, + "ldapBindTimeoutMillis": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait for a reply to an LDAP user authentication request. When requests are routed through a two factor authentication proxy, the delay needs to be high enough to permit the completion of the second factor of the authentication. Note that the Rubrik REST API server enforces a default authentication timeout of five minutes which is not controlled by this setting." + }, + "ldapConnectionTimeoutMillis": { + "type": "integer", + "format": "int32", + "description": "LDAP connection timeout. Used while establishing the connection pool and only becomes a factor when none of the LDAP servers respond within this interval." + }, + "ldapActiveDirectoryEmptyPaginatedSearchResultRetryNum": { + "type": "integer", + "format": "int32", + "description": "Number of Active Directory query retries after a query receives a paginated response that is empty with an indication that there is more to come. Experience indicates that the value should be three or less since empty responses normally come after two minutes." + }, + "ldapActiveDirectoryDisableMatchingRuleInChain": { + "type": "boolean", + "description": "Microsoft Active Directory supports a matching rule called LDAP_MATCHING_RULE_IN_CHAIN that is \"designed to provide a method to look up the ancestry of an object\". It achieves this efficiency by removing the need for multiple round trips over the network. Sometimes, this optimization leads to slow search performance. In such cases, disable this matching rule. When disabled, the Rubrik cluster sends multiple LDAP queries to locate the groups of a user." + }, + "ldapSDKLogLevel": { + "type": "string", + "description": "Log level of unboundid ldap sdk. Valid values are OFF, SEVERE, WARNING, INFO, CONFIG, FINE, FINER, FINEST, ALL." + }, + "keyVerificationRetryNum": { + "type": "integer", + "format": "int32", + "description": "Number of retries for key verification inside a single job." + }, + "keyVerificationRetryDurationMillis": { + "type": "integer", + "format": "int32", + "description": "Duration to sleep before the first retry of key verification. The sleep duration increases exponentially based on this value." + }, + "isHardwareEncryptedClusterMigratedToUseKEK": { + "type": "boolean", + "description": "Boolean value that determines whether the development work required to enable a hardware encrypted cluster to use KEK is complete. Before the development work is complete, this configuration makes sure all workflows (bootstrap, key rotation, node replacement, sdreset etc) continue to behave consistently without needing cluster-wide KEK." + }, + "isHardwareEncryptedClusterUsingKEK": { + "type": "boolean", + "description": "This variable applies only if isHardwareEncryptedClusterMigratedToUseKEK is true. Boolean value that determines whether the hardware encrypted cluster uses cluster-wide KEK. If this variable is false, a key rotation is required to migrate the cluster to cluster-wide KEK model. Applies to upgrade scenario. Bootstrap of hardware encrypted cluster will set this variable to true." + }, + "principalSearchApiCacheTtlSecs": { + "type": "integer", + "format": "int32", + "description": "Cache TTL for /principal_search endpoint (in seconds)." + }, + "sendStackTraceInServerErrorRestResponse": { + "type": "boolean", + "description": "Flag determining whether stack traces should be sent along with the '500 server error' response." + }, + "defaultSslCipherSuites": { + "type": "string", + "description": "colon(:) separated SSL cipher suites by order of preference." + }, + "weakerSslCipherSuites": { + "type": "string", + "description": "Some scenarios require weakening ciphers. E.g., Vcenter 5.5 negotiates TLSv1:AES256-SHA, Vcenter 6.0 negotiates AES256-GCM-SHA384 with AES256-SHA." + }, + "tlsProtocolForVmwareVCenter": { + "type": "string", + "description": "TLS protocol to be used when connecting to vCenter." + }, + "isSoftwareEncryptionAtRestEnabled": { + "type": "boolean", + "description": "Flag determining whether software encryption at rest is enabled." + }, + "isMetadataPartitionsEncryptionEnabled": { + "type": "boolean", + "description": "Flag determining whether metadata partitions are encrypted." + }, + "sprayMinimumTlsVersion": { + "type": "string", + "description": "Minimum TLS Version for Spray server. Allowed values are TLSv1, TLSv1.1, TLSv1.2." + }, + "smtpMinimumTlsVersion": { + "type": "string", + "description": "Minimum TLS Version for SMTP with a certificate. Allowed values are TLSv1, TLSv1.1, TLSv1.2." + }, + "registrationPublicRSAKey": { + "type": "string", + "description": "Path to public RSA key for cluster registration service." + }, + "supportTunnelDefaultInactivityTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Inactivity timeout after which support tunnel is automatically closed. Activity is defined as executing at least one command when logged in via SSH as the rksupport user." + }, + "supportTunnelTimeoutIsAbsolute": { + "type": "boolean", + "description": "Indicates whether the support tunnel uses absolute timeouts instead of inactivity timeout." + }, + "sshTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Inactivity timeout for interactive SSH sessions subject to certain safety constraints. To elaborate, interactive SSH sessions are continually monitored for new activity, by tailing the go-auditd log file (which leverages the auditd process-logging kernel extension). When no new commands are issued by a user for this time period, the sessions (whose processes are tracked by a persistent session ID from initial PAM authentication) are candidates to be terminated. For this configuration value to result in such action, the sshTimeoutEnabled property must be set to true." + }, + "sshTimeoutEnabled": { + "type": "boolean", + "description": "Enable interactive session inactivity timeout for SSH sessions, as discussed in the description for the sshTimeoutInSeconds configuration parameter. In order for session reaping to be in effect, this value must be true and the corresponding timeout value must be strictly positive. These values can be changed dynamicly, but may take a minute or two to take effect." + }, + "verifyEncryptionKeysFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to verify KMIP connectivity (if enabled)." + }, + "csrRsaKeyBits": { + "type": "integer", + "format": "int32", + "description": "Number of bits used for RSA key in certificate signing request." + }, + "csrMessageDigestAlgorithm": { + "type": "string", + "description": "Message digest algorithm used in certificate signing request." + }, + "networkIfacesForInternodeTraffic": { + "type": "string", + "description": "Network interfaces to allow internode traffic on. Allowed values are 'all', 'data', and 'management'." + }, + "iptablesLogAveragePacketsPerSecondLimit": { + "type": "integer", + "format": "int32", + "description": "Iptables will log dropped packet information. This option limits the average rate of logs per logging rule using a token bucket scheduler." + }, + "sshIsEnabledOnPort22": { + "type": "boolean", + "description": "Whether SSH is enabled for the admin/rksupport users." + }, + "upgradeTpmFirmwareJobSdfsPrecheckEnabled": { + "type": "boolean", + "description": "Whether to enable SDFS level prechecks in this job." + }, + "upgradeTpmFirmwareJobSdfsPrecheckSleepDurationSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep between retries for the SDFS precheck in this job." + }, + "upgradeTpmFirmwareJobSdfsPrecheckNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the SDFS precheck in this job." + }, + "enableTpmFirmwareNotification": { + "type": "boolean", + "description": "Whether to enable notification for TPM Firmware version check." + }, + "enableVcenterCertsMissingNotification": { + "type": "boolean", + "description": "Whether to enable notification if certificates are not configured so Rubrik is not validating them." + }, + "minTlsVersionForArchiving": { + "type": "string", + "description": "Minimum TLS version for non-private object store archival. Allowed values are TLSv1, TLSv1.1, TLSv1.2." + }, + "minTlsVersionForPrivateObjectStores": { + "type": "string", + "description": "Minimum TLS protocol to be used when connecting to private object stores or other object stores that use a custom endpoint. Allowed values are TLSv1, TLSv1.1, TLSv1.2." + }, + "minTlsVersionForAwsCloudNativeProtection": { + "type": "string", + "description": "Minimum TLS protocol to be used when connecting to aws server with a custom endpoint. Allowed values are TLSv1, TLSv1.1, TLSv1.2." + }, + "apiTokensPerUser": { + "type": "integer", + "format": "int32", + "description": "The number of API tokens a user is allowed to have at once." + }, + "enableCompressionInSprayServer": { + "type": "boolean", + "description": "Whether to enable compression in spray server." + }, + "enableLocalUserLockout": { + "type": "boolean", + "description": "Is account lockout setup on this cluster." + }, + "maxFailedLoginsForLocalUser": { + "type": "integer", + "format": "int32", + "description": "Number of consecutive failed logins until we lock the account on this cluster." + }, + "accountLockoutDurationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to lock account after max number of failed login attempts is exceeded." + }, + "enableOrgExclusiveResources": { + "type": "boolean", + "description": "Whether to enable organization exclusive resources." + }, + "samlSsoForceAuthn": { + "type": "boolean", + "description": "Whether to set ForceAuthn in SAML AuthnRequest." + }, + "defaultSamlShewnessToleranceInSeconds": { + "type": "integer", + "format": "int32", + "description": "The default clock skewness tolerance in seconds between Rubrik cluster and IdP. Okta is using 10 minutes as life time of a SAML response, thus +/- 5 minutes of skewness is at the same scale." + }, + "samlRelayStateLifetimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "Lifetime of a unconsumed SAML relay state in minutes." + }, + "cleanupSamlSsoRecordsFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to clean up expired SAML records including relay states and assertions." + }, + "cleanupSamlSsoRecordsMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Max instances per node for cleanupSamlSsoRecordsJob." + }, + "cleanUpAssertionExpiryBufferInMinutes": { + "type": "integer", + "format": "int32", + "description": "Buffer time used before cleaning up expired SAML assertions." + }, + "cleanupGpsAuthzFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to clean up authz info of gps users." + }, + "cleanupGpsAuthzMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Max instances per node for cleanupGpsAuthzJob, can be used to disable the job temporarily." + }, + "gpsAuthzTTLSinceLastUsageInMinutes": { + "type": "integer", + "format": "int32", + "description": "The TTL of gps Authz info since the last usage time." + }, + "gpsAuthzPermissionPageSize": { + "type": "integer", + "format": "int32", + "description": "The number of permissions stored in db as one page." + }, + "authzPageSize": { + "type": "integer", + "format": "int32", + "description": "Default size for pagination in user, rbac and org tables." + }, + "notifyExpiringCertificatesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to check certificate expirations and notify admin." + }, + "certificateExpirationNotificationDays": { + "type": "string", + "description": "CSV corresponding to the number of days until expiration when we want to send notifications." + }, + "secureSmbNginxTimeout": { + "type": "string", + "description": "nginx proxy_timeout value. The live mount becomes invalid after this duration. In that case, the volume group needs to be live mounted again." + }, + "hiddenSsoIdpMode": { + "type": "boolean", + "description": "When the value is true, we will not disclose the IdP name at the SSO login page; otherwise, we will not consider the IdP name as a private value." + }, + "useTeleport": { + "type": "boolean", + "description": "Use teleport infrastructure to open support tunnel." + }, + "minimumPasswordLength": { + "type": "integer", + "format": "int32", + "description": "Minimum accepted length of a user password." + }, + "maximumPasswordLength": { + "type": "integer", + "format": "int32", + "description": "Maximum accepted length of a user password. This should not be changed in most, if not all, cases; if ZXCVBN is on and this number is too large, the UI will freeze when creating a password." + }, + "minimumUpperCaseCharacters": { + "type": "integer", + "format": "int32", + "description": "Minimum number of upper case characters a user password requires." + }, + "minimumLowerCaseCharacters": { + "type": "integer", + "format": "int32", + "description": "Minimum number of lower case characters a user password requires." + }, + "minimumNumericCharacters": { + "type": "integer", + "format": "int32", + "description": "Minimum number of numeric characters a user password requires." + }, + "minimumSpecialCharacters": { + "type": "integer", + "format": "int32", + "description": "Minimum number of special characters a user password requires." + }, + "blockPreviousPasswords": { + "type": "boolean", + "description": "Track and block users from using any password a user has used in the past." + }, + "invalidateWebSessionsOnPasswordUpdate": { + "type": "boolean", + "description": "When a user's password is changed, invalidate all web sessions for that user, other than the one used to update the password." + }, + "invalidateApiTokensOnPasswordUpdate": { + "type": "boolean", + "description": "When a user's password is changed, invalidate all API Tokens for that user, other than the one used to update the password." + }, + "enforceSmbSecurity": { + "type": "boolean", + "description": "A Boolean that specifies whether or not the cluster enforces SMB security. When this value is 'true,' SMB security is enforced. When this value is 'false,' SMB security is not enforced. The default value is 'false.'" + }, + "webSessionWhitelistForPerUserLimit": { + "type": "string", + "description": "A String value that specifies a list of local username. Web sessions of users on this whitelist are not limited by the cap of the number of session tokens per user. We use comma ',' to as splitter between usernames in this whitelist value." + }, + "sprayServerSettingRefreshInSeconds": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the refresh period in seconds for spray server settings." + }, + "enableRestClientHostnameVerification": { + "type": "boolean", + "description": "A boolean flag to enable hostname verification in our rest client." + }, + "enableFipsForInFlight": { + "type": "boolean", + "description": "Boolean that specifies whether FIPS mode is enabled for in-flight encryption. When this value is true, FIPS mode is enabled for in-flight data encryption." + }, + "granularHierarchyCacheMetricsWhitelist": { + "type": "string", + "description": "A comma separated list that specifies whether hierarchy cache metrics should be emitted for each root instead of being aggregated." + }, + "maxThriftClientsForHierarchyCacheService": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Thrift clients to maintain in the pooledThrfitClient that is used by the HierarchyCacheService." + }, + "ldapSearchAcrossIntegrations": { + "type": "boolean", + "description": "A Boolean value that specifies whether to extend the LDAP search across all the LDAP integrations instead of limiting the search to the integration where the user is logged in. If the value is set to true then ldapActiveDirectoryDisableMatchingRuleInChain is ignored and assumed to be true as well." + }, + "totpStepsDelayLimit": { + "type": "integer", + "format": "int32", + "description": "An integer value indicates the max number of steps to look back for validating a time-based one-time password (TOTP) in case of time delays between client and server side. This value has no dependency on the client side configuraiton." + }, + "totpTimeStep": { + "type": "integer", + "format": "int32", + "description": "An integer value indicates the size of time step in seconds used in the TOTP protocol. The value has to be configured according to the client-side configuration. Google Authenticator uses 30 seconds." + }, + "totpPasswordLength": { + "type": "integer", + "format": "int32", + "description": "An integer value indicates the length of the time-based one-time password used in the TOTP protocol. The value has to be configured according to the client-side configuraiton. Google Authenticator uses 6." + }, + "totpAlgorithm": { + "type": "string", + "description": "A string value indicate the algorithem used for the time-based one-time password. The value has to be configured according to the client-side configuration. Google Authenticator uses HmacSHA1." + }, + "totpFailureDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "An integer value indicates the number of milliseconds to delay when TOTP authentication has failed. It is used for preventing brute-force attack over the TOTP one-time password." + }, + "hardwareEncryptionMigrationFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to migrate hardware encryption. The job will only be enabled when migration is needed. After migration is done, this job will not be scheduled again." + }, + "shieldBackupPrefixPath": { + "type": "string", + "description": "Root directory for shield backups in SDFS." + }, + "shieldBackupJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to backup shield secrets. It is supposed to be a low frequent job." + }, + "removeGhostPermissionsFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes to run job to remove ghost permissions. It is supposed to be a low frequent job." + }, + "removeGhostPermissionsMaxInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurret ghost permission cleanup jobs per node." + } + } + }, + "GlobalSnapshotConfig": { + "type": "object", + "properties": { + "similarity_threshold": { + "type": "number", + "format": "double", + "description": "Similarity threshold to pick a sim disk." + }, + "fpCachingInProgressThrottleDelayInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to delay a snappableSnapshotExpire or upload job if we decide to throttle it." + }, + "enableSnapshotLevelRetention": { + "type": "boolean", + "description": "Flag to determine if snapshots will have a static retention. If enabled, the expiration date of snapshots will be computed at the time of backup (instead of expiration job) and customer will be able to change the expiration date of the snapshots." + }, + "snappableRetentionOperationsPendingThrottleDelayInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to delay a snappableSnapshotExpire or upload job if snappable retention operations are pending for a snappable." + } + } + }, + "GlobalStorageArrayConfig": { + "type": "object", + "properties": { + "fileTransferParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for transferring a file from a remote host to the Rubrik cluster." + }, + "fileTransferBlockSize": { + "type": "integer", + "format": "int32", + "description": "Data block size in MB used in transferring files from remote hosts to the Rubrik cluster." + }, + "noBufferFileReadAlignmentBlockSize": { + "type": "integer", + "format": "int32", + "description": "Alignment block size to be used in no buffer file read when physical sector size is not available. The value should be a multiple of the disk physical sector size." + }, + "checkPatchFileFingerprints": { + "type": "boolean", + "description": "Whether to compare the fingerprints output by the patch file builder with an independent fingerprint copier." + }, + "enableStorageArrayChangedRegions": { + "type": "boolean", + "description": "Whether we should enable the use of changed regions in Storage Array snapshot jobs. Useful for disabling CBT code path if there happens to be a CBT-related bug." + }, + "shouldUseChangedRegionsForStorageArrayFulls": { + "type": "boolean", + "description": "Whether we should query changed regions when creating Storage Array full snapshots. Using the changed regions we can identify and drop ZEROED regions. Can be overridden if enableStorageArrayChangedRegions is set to false." + }, + "changedRegionsBatchSizeInGiB": { + "type": "integer", + "format": "int32", + "description": "Batch size in GiB to be used while querying Storage Array for Changed Regions. This value should be a multiple of the changed regions block size." + }, + "changedRegionsBlockSizeInOneBatch": { + "type": "integer", + "format": "int32", + "description": "Block size to be used while querying Storage Array for Changed Regions in one batch. This value should be a multiple of the fingerprint file size (64KB)." + }, + "disableBackupFetchDataCleanup": { + "type": "boolean", + "description": "Toggle for whether to disable the backup job's fetch data task undo cleanup in the case of job failure. We may want to disable the undo cleanup when we want to examine the contents of the snapshot dir." + }, + "shouldAllocateDiskSpace": { + "type": "boolean", + "description": "Whether or not we should allocate disk space using the disk space semaphore." + }, + "storageArrayMaxParallelIngest": { + "type": "integer", + "format": "int32", + "description": "Maximum number of volumes to ingest in parallel." + }, + "disableDisconnectVolumesFromHost": { + "type": "boolean", + "description": "Whether or not we should disconnect and delete storage array volumes on the host during snapshot job. This flag shouldn't be enabled unless it's absolutely necessary. Doing so could result in many volumes being connected to the host eventually bringing it down." + }, + "failVolumeScanOnUnableToFindMountPoints": { + "type": "boolean", + "description": "Toggle for whether to throw an error and fail if unable to find mount points on host for a storage array volume." + }, + "cleanupFilesOnHostCommandFailure": { + "type": "boolean", + "description": "Toggle for whether to cleanup files containing output of execution of commands on host in case it fails. Setting to false will result in leakage of files in case of failure." + }, + "shouldVerifySnapshotVolumesAgainstFingerprints": { + "type": "boolean", + "description": "Toggle for whether to verify integrity of snapshot volumes' patch files against samplings of their fingeprints." + }, + "sessionRequestTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for a storage array session network API call. Increase this if calls are taking longer than expected." + }, + "sessionAskTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for entire processing of storage array session call. This includes making the network request." + }, + "sessionUnmarshalResponseTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for the storage array session to unmarshal an HTTP response entity into String format. We have seen some cases where a response may take over a minute to unmarshal." + }, + "checkProcessStatusSleepInMs": { + "type": "integer", + "format": "int32", + "description": "How often to check if a process is complete." + }, + "listDevicesTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for discovering block devices." + }, + "listSerialsTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for discovering volume serials." + }, + "listWwnPortsTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for discovering WWN ports." + }, + "disconnectBlockDeviceTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for disconnecting block device." + }, + "listMountPointsScriptTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for discovering mount points." + }, + "verifyCommandTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for verifying if a commmand exists." + }, + "lvmCommandTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for general LVM-related commands." + }, + "lvmVolumeGroupRecreateTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for recreating a volume group from physical volumes backed by storage array volumes. This can take quite some time for large volume groups. We have seen this take 6 minutes for a 1TB volume group with 2 million files." + }, + "tlsProtocolForPureClient": { + "type": "string", + "description": "TLS protocol used by our client to the Pure Storage API." + }, + "shouldRefreshArrayInfoOnAix": { + "type": "boolean", + "description": "Toggle the value to enable or disable refresh storage configuration on AIX." + }, + "shouldRefreshArrayInfoOnLinux": { + "type": "boolean", + "description": "Toggle the value to enable or disable refresh storage configuration on Linux hosts." + }, + "shouldDisableRemoteScriptTermination": { + "type": "boolean", + "description": "Whether we should disable termination of remote storage array related scripts run on backup agents. We have seen problems such as CDM-104553 where sending a SIGTERM to a remote script process caused subsequent agent logging to fail." + }, + "storageArrayRestoreEnableReadAhead": { + "type": "boolean", + "description": "Whether to use Sequential Read-Ahead in the Storage Array export jobs." + }, + "recreatedVolumeGroupMountOptions": { + "type": "string", + "description": "Options used to mount the recreated volume groups' logical volumes during array-enabled fileset backups. noguard allows handling concurrent mounts." + }, + "recreatedVolumeGroupDisableConcurrent": { + "type": "boolean", + "description": "Whether we attempt to disable concurrent mode for recreated volume groups during array-enabled fileset backups. Concurrent mode can result in slower performance in some customer environments." + }, + "getVolumesWithSerialsRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times we will retry fetching volumes on a host with a specified set of volume serials." + }, + "getVolumesWithSerialsSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep in milliseconds between each try to fetch volumes on a host with a specified set of volume serials." + }, + "verifyIncrementalsPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of the data in a storage array volume snapshot to verify. The Rubrik cluster verifies the specified percentage of the data before accepting the snapshot as valid. Set to 100 to verify the entire snapshot, or set to a lower number for a faster validation process. For less that 100 percent, the Rubrik cluster performs verification on randomly selected data from the snapshot. When set to 0, the Rubrik cluster does not validate the snapshot data." + }, + "verifyIncrementalsMaxBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of bytes to verify when validating a storage array volume incremental snapshot. The Rubrik cluster performs verification on randomly selected data from the snapshot up to the specified number of bytes." + } + } + }, + "GlobalStormConfig": { + "type": "object", + "properties": { + "maxNumConvertDiskFormatJobsOnBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of ConvertDiskFormat jobs that can be spawned on Bolt." + }, + "maxNumUploadFileJobsOnBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of UploadFile jobs that can be spawned on Bolt." + }, + "reversePatchFilesDownloadThreadCountOnBolt": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use when downloading files in ReversePatchFiles." + }, + "stormRequestOptimalShardSize": { + "type": "integer", + "format": "int32", + "description": "Beyond this number, a shard can be considered to big a enough partition, cassandra does not work super well with big paritions. This will indicate deleting any deleteable entries from the shard." + }, + "stormInstanceOptimalShardSize": { + "type": "integer", + "format": "int32", + "description": "Beyond this number, a shard can be considered to big a enough partition, cassandra does not work super well with big paritions. This will indicate deleting any deleteable entries from the shard." + }, + "stormRequestMetadataGcThresholdTimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time after which the terminated metadata can be GCd." + }, + "stormInstanceMetadataGcThresholdTimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time after which the terminated metadata can be GCd." + }, + "waitTimeToAvoidCrossNodeRaceInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait in seconds after releasing the throttle of bad node and before acquiring the throttle, this helps in avoiding race between the two nodes." + }, + "collectLogsFromBolt": { + "type": "boolean", + "description": "We might want to disable collection of logs from Bolt instance to minimize network overhead or curb the flakiness of log download thrift end point." + }, + "collectLogsFromConverter": { + "type": "boolean", + "description": "We might want to disable collection of logs from Converter instance to minimize network overhead or curb the flakiness of log download thrift end point." + }, + "boltCloudInitFilePath": { + "type": "string", + "description": "This is a cloud init script. May change in case we want to use a hotfix for instantiation." + }, + "rivetCloudInitFilePath": { + "type": "string", + "description": "This is a cloud init script. May change in case we want to use a hotfix for instantiation." + }, + "boltLogMinFreeSpaceFraction": { + "type": "number", + "format": "double", + "description": "Bolt logs won't be copied locally if the free space on the local partition will fall below this fraction." + }, + "boltLogMaxSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Bolt logs won't be copied locally if the log bundle size is more than this value." + }, + "boltLogChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Log Chunk Size to read in each thirft call, in MB." + }, + "boltLogDirMaxSizeFraction": { + "type": "number", + "format": "double", + "description": "Bolt logs won't be copied locally if boltLogDir size will become more than this fraction of total space on local partition." + }, + "requestPollIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time between successive poll to storm manager after placing a storm launch request." + }, + "maxWaitTimeForRequestInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait for storm manager request to succeed." + }, + "sleepTimeToPollIsStormHealthyInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time to poll whether the storm is healthy." + }, + "healthInstanceCheckInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of health checks for storm instances that can happen in parallel." + }, + "stormInstanceRunInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of storm instances that can be launched or terminated in parallel." + }, + "maxNumRetriesForPingingConverter": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when establishing contact with converter instance." + }, + "maxNumRetriesForPingingBolt": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when establishing contact with bolt instance." + }, + "maxNumRetriesForPingingRivet": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when establishing contact with Rivet instance." + }, + "sleepTimeForConverterInstanceInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time while waiting for services to come up on converter." + }, + "sleepTimeForRubrikBoltInstanceInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time while waiting for services to come up on bolt." + }, + "sleepTimeForRubrikRivetInstanceInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time while waiting for services to come up on Rivet." + }, + "stormTypesToAddressForLaunchingInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of storm types that can be operated on for addressing storm requests in parallel. This is an upper bound on number of threads used for providing execution context in storm request management job." + }, + "stormShardedScanThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to allow scanning sharded tables in parallel in StormManager. To scan the sharded tables, we do point queries over all the shards, we can optimize the fetch time with more threads." + }, + "linuxConverterCloudInitScriptFilePath": { + "type": "string", + "description": "This is a cloud init script for converter. May change in case we want to use a hotfix for instantiation." + }, + "awsWindowsConverterCloudInitScriptFilePath": { + "type": "string", + "description": "A path to the cloud initialization script for the converter. This value can change to use a different script in the event that expected resources, such as ports, are not available." + }, + "azureWindowsConverterCloudInitScriptFilePath": { + "type": "string", + "description": "A path to the cloud initialization script for the converter. This value can change to use a different script in the event that expected resources, such as ports, are not available." + }, + "azureStormImagesContainerName": { + "type": "string", + "description": "Name of the container in the general purpose storage account that will be used to store rubrik images." + }, + "stormThriftServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout in ms for client timeout for Cloud storage service." + }, + "stormInstancesCertificatesDirectory": { + "type": "string", + "description": "Directory under which all storm instances certificates will be stored." + }, + "stormMaintenanceJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Gives the frequency of StormMaintenanceJob in minutes." + }, + "maxJobsPerConverter": { + "type": "integer", + "format": "int32", + "description": "Maximum number of jobs running on a converter." + }, + "maxJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of jobs running on a bolt." + }, + "maxJobsPerRivet": { + "type": "integer", + "format": "int32", + "description": "Maximum number of jobs running on a Rivet." + }, + "maxAmiToVmdkConversionPerConverter": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Ami to Vmdk conversion running on a converter." + }, + "maxPullReplicateJobPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum pull replicate job during Ami to Vmdk conversion that can run on a bolt." + }, + "maxCloudInstantiateJobsPerConverter": { + "type": "integer", + "format": "int32", + "description": "Maximum number of cloud instantiation jobs running on a converter." + }, + "maxCloudInstantiateJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of cloud instantiation jobs running on a bolt." + }, + "maxCloudInstantiateJobsPerRivet": { + "type": "integer", + "format": "int32", + "description": "Maximum number of cloud instantiation jobs running on a Rivet." + }, + "maxCloudConsolidationJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of cloud consolidation jobs running on a bolt." + }, + "maxArchivalReverseJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of archival reverse jobs running on a bolt." + }, + "maxIndexingJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of indexing jobs running on a bolt." + }, + "maxDownloadFileJobsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of download file jobs running on a bolt." + }, + "maxNumberOfDiskAttachmentsPerBolt": { + "type": "integer", + "format": "int32", + "description": "Maximum number of disk attachments possible on the bolt instance at a time." + }, + "maxNumberOfDiskAttachmentsPerRivet": { + "type": "integer", + "format": "int32", + "description": "Maximum number of disk attachments possible on the Rivet instance at a time." + }, + "maxNumberOfDiskAttachmentsPerConverter": { + "type": "integer", + "format": "int32", + "description": "Maximum number of disk attachments possible on the converter instance at a time." + }, + "pollFrequencyToCheckStatusOfVhdCopyInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep between consecutive call to poll the status of vhd copy." + }, + "maxWaitTimeToPollVhdCopyStatusInSecs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait in seconds while polling vhd copy status." + }, + "pollFrequencyToCheckStatusOfAmiShareInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep between consecutive call to poll the status of ami share." + }, + "maxWaitTimeToPollAmiShareStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait in ms while polling ami share status." + }, + "smlSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time of the Storm Manager Loop thread between two consecutive single loop run." + }, + "shouldUseBootstrappedImage": { + "type": "boolean", + "description": "When this flag is set to true, we will first bootstrap the base image." + }, + "stormInstanceSshKeyPairDirectory": { + "type": "string", + "description": "Directory under which all the storm instances private ssh key pair will be saved. This is relative to stormPrefixPathInSdScratch." + }, + "cloudComputePortsYmlPath": { + "type": "string", + "description": "Path of the yml file that specifies the ports to be checked for storm instances." + }, + "maxStormLaunchBatchSizePerJob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of instances to launch in a single run of the StormInstanceRun job." + }, + "maxStormTerminateBatchSizePerJob": { + "type": "integer", + "format": "int32", + "description": "Maximum number of instances to terminated in a single run of the StormInstanceRun job." + }, + "requestJobTimeFractionForInstanceAvailability": { + "type": "number", + "format": "double", + "description": "Storm instance ttl should have at least this fraction of storm request job time for its availability. This is used for long running storm requests." + }, + "maxWaitTimeToPollDisksSetupStatusInSecs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to wait in seconds while polling disk setup status. We issue a setup disks call after launching a storm for long running requests." + }, + "pollFrequencyToCheckDisksSetupStatusInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep between consecutive call to poll the status of disks setup." + }, + "additionalDataDiskSizeInGB": { + "type": "integer", + "format": "int32", + "description": "It represents the size of new data disks." + }, + "dataDiskTypeInAws": { + "type": "string", + "description": "It represents the disk type of new data disks in AWS." + }, + "dataDiskTypeInAzure": { + "type": "string", + "description": "It represents the disk type of new data disks in Azure." + }, + "defaultBoltDataDiskUsableSizeInAwsInGB": { + "type": "integer", + "format": "int32", + "description": "Size of default data disk in Bolt in AWS. Total disk size is 500 GB but cassandra snapshot and local staging takes significant amount of space. Hence, total usable space in 432 GB." + }, + "defaultRivetDataDiskUsableSizeInAwsInGB": { + "type": "integer", + "format": "int32", + "description": "Size of default data disk in Bolt in AWS. Total disk size is 500 GB but cassandra snapshot and local staging takes significant amount of space. Hence, total usable space in 432 GB." + }, + "defaultBoltDataDiskUsableSizeInAzureInGB": { + "type": "integer", + "format": "int32", + "description": "Size of default data disk in Bolt in Azure. Total data disk size is 256 GB but cassandra snapshot and local staging takes up 75 GB of space. Hence, total usable space is 181 GB." + }, + "fractionSpaceTakenUpToStoreMetadata": { + "type": "number", + "format": "double", + "description": "This value represents the fraction of storage in disk taken up by file system, cerebro and cassandra related operation." + }, + "chunkSizeForExtraDataDiskStorageInGB": { + "type": "integer", + "format": "int32", + "description": "Extra storage which should be added will be multiple of this chunk size. Note that the size of new data disks is given by additionalDataDiskSizeInGB." + }, + "maxStorageAllowedForLongRunningBoltInstancesInGB": { + "type": "integer", + "format": "int32", + "description": "Maximum storage allowed for long running bolt instance in GB." + }, + "maxStorageAllowedForLongRunningRivetInstancesInGB": { + "type": "integer", + "format": "int32", + "description": "Maximum storage allowed for long running Rivet instance in GB." + }, + "minRuntimeOfLongJobInMins": { + "type": "integer", + "format": "int32", + "description": "This is the minimum amount of time a job should run on storm for the request to be put in LONG running storm instance." + }, + "ec2UserSshPublicKeyPath": { + "type": "string", + "description": "Path where ssh key for ec2 user is copied." + }, + "boltUserSshPublicKeyPath": { + "type": "string", + "description": "Path where ssh key for rksupport user is copied." + }, + "lincUserSshPublicKeyPath": { + "type": "string", + "description": "Path where ssh key for ubuntu user is copied." + }, + "wincUserSshPublicKeyPath": { + "type": "string", + "description": "Path where ssh key for Administrator user is copied." + }, + "registerOnlyGenericCloudComputeProcessorTestOnly": { + "type": "boolean", + "description": "If this flag is enabled, the cloud compute server registers handlers only for the generic (non CDM specific) cloud compute service. This is intended to be used for testing only, to ensure that CloudOn jobs only make calls to the generic cloud compute handler." + }, + "rivetUserSshPublicKeyPath": { + "type": "string", + "description": "Path where ssh key for rksupport user is copied." + } + } + }, + "GlobalThorConfig": { + "type": "object", + "properties": { + "archivalLockManagerCacheTimeoutInMilis": { + "type": "integer", + "format": "int32", + "description": "The archival lock manager caches the contents of the cluster lock file and the cluster encryption file. This specifies the expiration time on that cache." + }, + "downloadChunkSizeForFileReconstructionInMB": { + "type": "integer", + "format": "int32", + "description": "While downloading files which are split on the archival location we need to download multiple chunk ranges and write to a single file. This variable defines the default chunk size." + }, + "thresholdForOutOfSpaceNotificationInMB": { + "type": "integer", + "format": "int64", + "description": "Specifies the amount of space in megabytes. The system sends an out of space notification when the available space on an archival location drops below this value." + }, + "archivalSpaceUsageLowThresholdPercent": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the available archival space to use as the low notification threshold. The system sends a notification when the space usage exceeds the specified percentage." + }, + "archivalSpaceUsageMediumThresholdPercent": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the available archival space to use as the medium notification threshold. The system sends a notification when the space usage exceeds the specified percentage." + }, + "archivalSpaceUsageHighThresholdPercent": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the available archival space to use as the high notification threshold. The system sends a notification when the space usage exceeds the specified percentage." + }, + "archivalUploadMaxRetriesForFatalException": { + "type": "integer", + "format": "int32", + "description": "For Archival Upload, we retry uploading the content files which have not been uploaded yet due to fatal exception. This specifies the number of times to try before failing." + }, + "archivalUploadMaxRetriesForRetryableException": { + "type": "integer", + "format": "int32", + "description": "For Archival Upload, we retry uploading the content files which have not been uploaded yet due to retryable exception. This specifies the number of times to try before failing." + }, + "archivalUploadRetrySleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "Threshold in seconds for the archival upload job thread to sleep before retrying again." + }, + "minTimeForUploadingFullInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum time for which we should wait before uploading full even if other conditions are met (15 * 24 * 60)." + }, + "minFullUploadRefsCount": { + "type": "integer", + "format": "int32", + "description": "Min count of jobs that can upload full snapshots." + }, + "maxArchiveJobsThatCanRunOnCluster": { + "type": "integer", + "format": "int32", + "description": "Max number of archive jobs that will run on the cluster irrespective of whether a full or increment is being archived. This parameter is provided primarily as a limit value in order to cap the maximum number of archive jobs if needed." + }, + "maxArchiveJobsThatCanRunPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "Max number of archive jobs that will run for each data location irrespective of whether a full or increment is being archived. This parameter is provided primarily as a limit value in order to cap the maximum number of archive jobs per data location if needed." + }, + "pctOfFullUploadJobsPerNode": { + "type": "number", + "format": "double", + "description": "Percentage of upload jobs per node that can upload full. If we have n jobs running per node then we can have max pctOfFullUploadJobsPerNode * n * numNodes jobs that can upload full." + }, + "forceFullUploadToArchivalStore": { + "type": "boolean", + "description": "Force a full upload to archival store." + }, + "chunkDownloadRetryCount": { + "type": "integer", + "format": "int32", + "description": "Retry count for chunk download from object store in case of a network flap." + }, + "chunkDownloadBackoffInitialRangeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial back off time for chunk download in case of a failure." + }, + "downloadSnapshotFromCloudJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent download snapshot from cloud jobs per node." + }, + "partUploadRetryCount": { + "type": "integer", + "format": "int32", + "description": "Retry count for uploading chunks to archival storage. This is the number of times we retry irrespective of the type of failure." + }, + "partUploadInitialSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep time in ms when doing back off during part uploads." + }, + "ttlForAJenkinsTestBucketInHours": { + "type": "integer", + "format": "int32", + "description": "For buckets used in jenkins test we will delete all files in that particular bucket if any file in that bucket exceeds the TTL value." + }, + "maxDownloadSnapshotRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries of the download job which is an one off job." + }, + "trustObjectStoreTlsCertificateOnFirstUse": { + "type": "boolean", + "description": "Sets whether or not we should trust self signed certificates from private object stores." + }, + "minFingerprintsToValidateForArchival": { + "type": "integer", + "format": "int32", + "description": "Minimum number of fingerprints to validate after upload or download of a subobject." + }, + "maxFingerprintsToValidateForArchival": { + "type": "integer", + "format": "int32", + "description": "Maximum number of fingerprints to validate after upload or download of a subobject." + }, + "numFingerprintsToValidateForArchivalPerGB": { + "type": "integer", + "format": "int32", + "description": "Number of fingerprints to validate after upload or download of a subobject per GB of logical size. The actual number of fingerprints to validate is bounded between minFingerprintsToValidateAfterUpload and maxFingerprintsToValidateAfterUpload." + }, + "cacheBlockSizeInKB": { + "type": "integer", + "format": "int32", + "description": "Block size used by Archival Service helper to download from archival source." + }, + "cacheBlockSizeInKBForBolt": { + "type": "integer", + "format": "int32", + "description": "Block size used by Archival Service helper to download from archival source. This config overwrites cacheBlockSizeInKB in bolt cluster." + }, + "archivalServiceHelperThriftTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Archival Service Helper Thrift Timeout In seconds." + }, + "nonCacheableKeyTtlInSeconds": { + "type": "integer", + "format": "int32", + "description": "TTL in seconds for non Cacheable keys that are used for fingerprint validation after snapshot upload." + }, + "archivedFileMetadataCacheTTLInSeconds": { + "type": "integer", + "format": "int32", + "description": "TTL in seconds for archived file metadata cache entries." + }, + "archivedFileMetadataCacheMaxEntries": { + "type": "integer", + "format": "int32", + "description": "Number of entries to keep in archived file metadata cache." + }, + "archivedFileMetadataCacheThreadCount": { + "type": "integer", + "format": "int32", + "description": "Thread count for archived file metadata cache." + }, + "retryForUploadingSubObjectInArchiveJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries to archive a subobject." + }, + "retryForValidatingPatchFilesAfterArchive": { + "type": "integer", + "format": "int32", + "description": "Number of retries to validate an archived patch file." + }, + "exitArchivalRecoveryJobCleanly": { + "type": "boolean", + "description": "Flag to indicate clean exit of the ArchivalRecovery job. Recovery job runs in a rollforward task and tries to recover all snappables, in case of even one failure it will keep on retrying and holding on to all the throttles This flag is just a manual indication for the recovery job to exit." + }, + "failOnIncosistentSnapshotDuringRecovery": { + "type": "boolean", + "description": "Flag whether to fail recovery or refresh job if any inconsistent recovered snapshot is found during verification. In a reader writer model, the owner cluster could have expired or consolidated some archived snapshots causing the reader cluster's view of the recovered snapshots to be stale. Setting this to False allows us run the verification task and identify such snapshots without causing the recovery or refresh job to remain stuck." + }, + "failOnPopulateMetadataFailureDuringRecovery": { + "type": "boolean", + "description": "Flag whether to fail recovery or refresh job if a failure is encountered in populating one or more recovered snappables to metadata store. In a reader writer model, the owner cluster could have made some changes which cause this inconsistent behavior. Rather than failing the entire job, it makes more sense to continue with whatever we were able to recover." + }, + "recoverySkipPathVerification": { + "type": "boolean", + "description": "Flag for skipping the path verification. Path verification could be time consuming as it will operate on all the snapshots of the snappable and query the cloud to see if the path actually exists." + }, + "throttleWaitTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Wait time in seconds to acquire throttle." + }, + "throttleSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time in seconds for acquiring throttle." + }, + "cloudInstantiationJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each instantiation job run." + }, + "cloudImageConversionJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between successive cloud image conversions." + }, + "useArchivalLockManagerCache": { + "type": "boolean", + "description": "Feature toggle specifying whether to use a cache for the archival lock manager." + }, + "putArchivalReaderMetadataRetryCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to add archival location metadata when connecting to an archival location as a reader cluster." + }, + "putArchivalReaderMetadataInitialSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep duration before retrying adding archival location metadata when connecting to an archival location as a reader cluster." + }, + "promoteOperationRetryCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries to perform retryable operations (such as deleting reader metadata, locking the archival location, and performing connectivity checks) when promoting a reader archival location to an owner location." + }, + "promoteOperationInitialSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Initial sleep duration before retrying performing a retryable operation (such as deleting reader metadata, locking the archival location, and performing connectivity checks) when promoting a reader archival location to an owner location." + }, + "uploadPercentDiskSpaceNeeded": { + "type": "number", + "format": "double", + "description": "Percentage of logical disk space required to construct patch files for upload. Scratch dir does not have replication so we will be using only x /(3/2) disk space. We can further divide it by a factor of 2.75 provided by zstd compression.So the factor of disk usage comes out to be (2/3)/2.75 = 0.24." + }, + "spaceReservedOnArchiveInMB": { + "type": "integer", + "format": "int32", + "description": "We need some disk space reserved in the archive for doing metadata operations when running expiration or gc on a blobstore group." + }, + "acceptableArchivalThresholdViolationInHours": { + "type": "integer", + "format": "int32", + "description": "Grace period we give a snapshot to be archived, in hours." + }, + "archivalLocationConnectivityCheckFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each connectivity check run." + }, + "archivalMaintenanceFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each archival maintenance job run." + }, + "staleArchivalFileDeletionMaxAttempts": { + "type": "integer", + "format": "int32", + "description": "Maximum for Archival Maintenance job to try to cleanup stale files." + }, + "sleepTimeForConnectivityCheckInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to sleep before retrying and checking data location connectivity state." + }, + "importImageWaitTimeInMinutes": { + "type": "integer", + "format": "int32", + "description": "The time to wait for an import image job." + }, + "importImageSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time between each time we check the import image job progress." + }, + "cloudTransientImageGcTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to garbage clean a transient cloud image after it is created." + }, + "genericComputeWaitTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait for compute tasks without specific configs." + }, + "genericComputeSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to sleep for compute tasks without specific configs." + }, + "uploadJobNoWorkDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay the job if there is no work to do." + }, + "logUploadJobNoWorkDelayInMillis": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay the job if there is no work to do." + }, + "cloudInstantiateDelayInMillisIfNoArchivedSnapshot": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to delay cloud instantiation job if no snapshot exists at archival location." + }, + "cloudInstantiateDefaultImageConversionFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "Interval at which the instantiation job runs to convert the latest snapshot to a cloud image." + }, + "onDemandInstantiateSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for instantiating snapshots on cloud." + }, + "instantiateImageJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for instantiating images on cloud." + }, + "retryForUploadingInInstantiateJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries for all uploads required in the cloud instantiate job." + }, + "shouldLaunchBoltForCloudConsolidation": { + "type": "boolean", + "description": "Decide whether consolidation for compute supported locations should be run on local cluster. If it is set to true, then bolt is launched to run consolidation on archival locations that support compute." + }, + "shouldLaunchBoltForArchivalReverse": { + "type": "boolean", + "description": "Boolean flag to indicate whether to launch a bolt instance to perform chain reversal on archival locations with compute enabled." + }, + "maxWaitTimeOnBoltForConsolidationInMins": { + "type": "integer", + "format": "int32", + "description": "This is the maximum time in minutes we wait after placing a request for bolt for consolidation." + }, + "maxWaitTimeOnBoltForArchivalReverseOf1GBInMins": { + "type": "integer", + "format": "int32", + "description": "Number of minutes to wait per GB of data to reverse after placing a requiest for a bolt instance for archival reverse." + }, + "maxWaitTimeOnBoltForArchivalReverseInMins": { + "type": "integer", + "format": "int32", + "description": "Maximum time in minutes to wait after placing a request for a bolt instance for archival reverse." + }, + "estimatedJobTimeForBoltArchivalReverseOf1GbInMins": { + "type": "integer", + "format": "int32", + "description": "How long, in minutes, we expect a bolt archival reverse job to take per GB." + }, + "consolidationStormPollInternvalInSeconds": { + "type": "integer", + "format": "int32", + "description": "This is the polling interval for querying the storm request status." + }, + "archivalReverseStormPollIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Polling interval for querying storm request status for archival reverse." + }, + "shouldConsolidateSnapshotsOnArchive": { + "type": "boolean", + "description": "Enable consolidation of expired snapshots for space reclamation on archival location when set to true. When enabled, consolidation will be performed on supported archival location types based on various parameters." + }, + "consolidationBandwidthQueriesInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of consolidation bandwidth queries that can be operated upon BandwidthStats in parallel." + }, + "minArchivalConsolidationDownloadBandwidthInBytesPerSecond": { + "type": "integer", + "format": "int32", + "description": "Display notification prompting the user to switch off archival consolidation when the download bandwidth consumed in archival consolidation is below this value." + }, + "minArchivalConsolidationUploadBandwidthInBytesPerSecond": { + "type": "integer", + "format": "int32", + "description": "Display notification prompting the user to switch off archival consolidation when the upload bandwidth consumed in archival consolidation is below this value." + }, + "minDurBetweenFailedConsolidationEventsInMins": { + "type": "integer", + "format": "int32", + "description": "This is the minimum time in minutes we wait before sending successive failure notifications for consolidation." + }, + "minExpiredChainLengthForPrivateCloudConsolidation": { + "type": "integer", + "format": "int32", + "description": "This is the minimum chain length of expired snapshots that is required for consolidation to run on private cloud locations." + }, + "maxExpiredChainLengthForPrivateCloudConsolidation": { + "type": "integer", + "format": "int32", + "description": "This is the maximum chain length of expired snapshots after which consolidation is definitely run on private cloud locations." + }, + "minPercentOfExpiredSizeForPrivateCloudConsolidation": { + "type": "integer", + "format": "int32", + "description": "This is the minimum percentage of physical size of expired blobs wrt to logical size of the group which would make it necessary to run consolidation on private cloud locations." + }, + "minPercentOfPatchSizeToSplitChain": { + "type": "integer", + "format": "int32", + "description": "This is the minimum percentage of maximum physical sized blob wrt logical size of the group which would make it eligible to be excluded for consolidation within the expired chain." + }, + "multiplicationFactorToSplitConsolidationSpec": { + "type": "integer", + "format": "int32", + "description": "This is the factor with which the maximum physical sized blob is multiplied and compared to the physical size of the contiguous chain to split the chain into two chains." + }, + "diskSpaceFactorForCloudConsolidation": { + "type": "number", + "format": "double", + "description": "This is the factor which gets multiplied to existing disk space needed for computing space required on bolt for consolidation." + }, + "retryForConvertingDiskFormat": { + "type": "integer", + "format": "int32", + "description": "Number of retries for converting source disk to destination disk in instantiate job." + }, + "cloudStorageServiceAcceptQueueSizePerThread": { + "type": "integer", + "format": "int32", + "description": "Accept queue size for the storage server in Cloud Storage Service." + }, + "defaultChunkSizeToEncryptForArchivalInKiB": { + "type": "integer", + "format": "int32", + "description": "While uploading to an encrypted archival location, we break down the data into chunks and encrypt them individually. This is the default size of these chunks in KiB." + }, + "retryCountForRubrikBoltInstance": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry when establishing contact with bolt instance." + }, + "deleteCloudImageAndInstanceJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for deleting cloud images." + }, + "useBoltForInstantiation": { + "type": "boolean", + "description": "This is a toggle for allowing bolt for instantiation." + }, + "completeAsyncDownloadSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to wait before retrying a failed attempt to complete an asynchronous file download." + }, + "asyncDownloadJobProgressPollingSubTaskWeight": { + "type": "number", + "format": "double", + "description": "Approximate fraction of download job progress that should correspond to polling on asynchronous retrievals." + }, + "genericRetryCount": { + "type": "integer", + "format": "int32", + "description": "Generic retry count to be used in thor code." + }, + "genericFewerRetryCount": { + "type": "integer", + "format": "int32", + "description": "Generic retry count with fewer retries to be used in thor code." + }, + "genericSleepTimeForPollingInMillis": { + "type": "integer", + "format": "int32", + "description": "Generic sleep count to be used in thor code." + }, + "genericSleepTimeBetweenRetriesInSeconds": { + "type": "integer", + "format": "int32", + "description": "Generic sleep time to use between retries for upload and download type operations." + }, + "timeToWaitForRemoteJobInstanceToTerminateInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait for a thrift-initiated job instance to complete." + }, + "sleepTimeForCheckingThriftJobInstanceInMs": { + "type": "integer", + "format": "int32", + "description": "Time between consecutive checks of thrift-initiated job instance." + }, + "timeToWaitForThriftJobCancelInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait for a thrift-initiated job instance to cancel." + }, + "sleepTimeForCheckingThriftJobCancelInMs": { + "type": "integer", + "format": "int32", + "description": "Time between consecutive checks of thrift-initiated job instance." + }, + "allowedAgeAsyncDownloadJobIdInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes after which an asynchronous retrieval job id should not be used further and a new retrieval request should be initiated." + }, + "thriftSocketConnectionTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Thrift connection timeout when accessing the Archive (S3) random access services." + }, + "thriftSocketSendTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Thrift socket send timeout when accessing the Archive (S3) random access services." + }, + "thriftSocketReceiveTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Thrift socket receive timeout when accessing the Archive (S3) random access services." + }, + "hdsEnableMultiPartUpload": { + "type": "boolean", + "description": "Enable multipart upload for HDS archival locations." + }, + "encryptedServiceCacheSize": { + "type": "integer", + "format": "int32", + "description": "Number of entries in the encrypted service metadata cache." + }, + "encryptedServiceCacheExpiryInMinutes": { + "type": "integer", + "format": "int32", + "description": "Expire entries after the specified duration has passed since the entry was created." + }, + "shouldIncludeCaCertsInTofu": { + "type": "boolean", + "description": "Use all the certs in the certificate chain for TOFU. Note that we do not validate hostnames in the cert, thus enabling this option causes that anyone with a signed cert from the CA can impersonate the target." + }, + "bandwidthStatsNumBucketsToKeep": { + "type": "integer", + "format": "int32", + "description": "Number of buckets we keep in bandwidth stats. Note that each bucket corresponds to an hour, therefore this effectively specifies how much history we keep in terms of number of hours, the chosen value represents a week." + }, + "bandwidthStatsNumBucketsToExpire": { + "type": "integer", + "format": "int32", + "description": "Number of buckets we expire during each invocation of the bucket expiration procedure." + }, + "bandwidthStatsNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for operations related to the bandwidth stats framework." + }, + "bandwidthStatsInitRangeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial backoff to use for bandwidth stats operations in the case of failures." + }, + "sleepAfterUploadUndoCleanupInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time after file deletion in undo for upload." + }, + "relicInstancesAndImagesRetentionPeriodInHours": { + "type": "integer", + "format": "int32", + "description": "Time for which relic public instances and public cloud images should be retained in the metadatastore." + }, + "relicAppImagesRetentionPeriodInHours": { + "type": "integer", + "format": "int32", + "description": "Time for which relic app cloud images should be retained in the metadatastore." + }, + "maxDownloadJobsThatCanRunOnCluster": { + "type": "integer", + "format": "int32", + "description": "Max number of snapshot download jobs that can run on the cluster irrespective of the target archival locations." + }, + "shouldDownloadBlobChainForRecovery": { + "type": "boolean", + "description": "If true, it will download the whole blob chain for the snapshot and DOWNLOAD_SNAPSHOT_BLOB_CHAIN_FROM_ARCHIVE job will be run. The blob chain download is not done for Glacier as download from glacier is asynchronous." + }, + "shouldSnapshotDownloadLeverageLocalSnapshotForNonDedup": { + "type": "boolean", + "description": "If true, for the first full snapshot download, it will leverage local snapshot and only download the range that is changed based on the fingerprint comparison. Note that this config is only applicable to non-deduped snapshot download." + }, + "shouldSnapshotDownloadLeverageLocalSnapshotForDedup": { + "type": "boolean", + "description": "If true, for the first full snapshot download, it will leverage local snapshot and only download the range that is changed based on the fingerprint comparison. Note that this config is only applicable to deduped snapshot download." + }, + "shouldDownloadIncrementalSnapshot": { + "type": "boolean", + "description": "If true, it allows incremental snapshot download if some other snapshot is already downloaded in the rehydrated location." + }, + "shouldIncrementalSnapshotDownloadLeverageDiffTreeBlobStore": { + "type": "boolean", + "description": "If true, it allows incremental snapshot download to increment on top of the closest downloaded snapshot, which will need to leverage diff-tree blobstore." + }, + "snapshotDownloadFingerprintSimilarityPercentageThresholdForNonDedup": { + "type": "number", + "format": "double", + "description": "The fingerprint similarity threshold. If the similarity of the two fingerprints are above this threshold, we can leverage local snapshot and only download the range that is different. Note that this config is only applicable to non-deduped snapshot download." + }, + "snapshotDownloadFingerprintSimilarityPercentageThresholdForDedup": { + "type": "number", + "format": "double", + "description": "The fingerprint similarity threshold. If the similarity of the two fingerprints are above this threshold, we can leverage local snapshot and only download the range that is different. Note that this config is only applicable to deduped snapshot download." + }, + "snapshotDownloadSameContentSizeThresholdInGB": { + "type": "integer", + "format": "int32", + "description": "The fingerprint same content size threshold. If the size of the same content of the two fingerprints is above this threshold, we can leverage local snapshot and only download the range that is different." + }, + "archivalFileDownloadRetrySleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "Num seconds to sleep before retrying file download from archive." + }, + "downloadRangeReadChunkSizeInMB": { + "type": "integer", + "format": "int32", + "description": "Size of chunk when downloading a file using range reads." + }, + "downloadRangeReadChunkDownloadInitialSleepInMilliseconds": { + "type": "integer", + "format": "int32", + "description": "Num milliseconds to sleep before retrying chunk download when downloading a file using range reads." + }, + "downloadRangeReadChunkDownloadRetries": { + "type": "integer", + "format": "int32", + "description": "Retries for chunk download when downloading using range reads." + }, + "shouldUseOsDiskInfoForVmwareInstantiation": { + "type": "boolean", + "description": "Use the OS Disk information persisted into snapshot table for instantiation. If this is set to false, use the disk with lowest disk id as OS Disk." + }, + "numQueriesToGetForBandwidthInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of queries that can be acted upon while fetching bandwidth stats." + }, + "timeOutForBandwidthQueriesInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for parallel bandwidth queries while fetching bandwidth stats (in seconds)." + }, + "bandwidthStatsSizeForFlush": { + "type": "integer", + "format": "int32", + "description": "Number of bandwidth stats to be present in the buffer before persisting to the table." + }, + "bandwidthStatsPeriodForFlushInSecs": { + "type": "integer", + "format": "int32", + "description": "Maximum time for which buffered bandwidth stats will not be flushed for (in seconds)." + }, + "bandwidthStatLogThresholdInBytes": { + "type": "integer", + "format": "int32", + "description": "Threshold for logging bandwidth stats in bandwidth logging input stream. The value given here denotes 500MB." + }, + "usePipeliningForUpload": { + "type": "boolean", + "description": "Flag indicating whether to use pipelining for patch file creation and upload during execute upload." + }, + "uploadPipelineTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Time out duration for the futures initialized in the upload pipeline. The specified actual value represents 100 days. We keep such a high value due to be safe against the long running nature of upload jobs." + }, + "refreshDurationForDisconnectedLocationNotificationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Duration between posting repetitive notification for disconnected condition on an archival location." + }, + "minBandwidthForUploadInMbps": { + "type": "number", + "format": "double", + "description": "Specifies the minimum upload bandwidth required in megabits per second." + }, + "connectivityCheckDownloadRetries": { + "type": "integer", + "format": "int32", + "description": "Specifies the retry count for downloading file when doing archival connectivity check." + }, + "shouldUploadMetadataForCloudOnResourcesForUpgrade": { + "type": "boolean", + "description": "Flag whether to upload the metadata for cloud on resources. This is required as part of moving to 5.0x. When set to true this flag will disable the cloud instantiation jobs till the metadata for images and instances from previous instantiations are uploaded to the archival location. The uploadMetadataForCloudOnResources job will set the flag to false on successful exit." + }, + "shouldUseNonEnaTemporaryInstance": { + "type": "boolean", + "description": "Flag wether to use non ENA instance types as temporary instance." + }, + "nonENATemporaryInstanceType": { + "type": "string", + "description": "The instance type of temporary instance." + }, + "uploadMetadataForCloudOnResourcesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between each uploadMetadataForCloudOnResources check." + }, + "multipartDownloadThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use for concurrency when downloading multipart files from archive." + }, + "multipartDownloadSleepTimeInSeconds": { + "type": "integer", + "format": "int32", + "description": "When downloading multiple parts in parallel, sleep for this amount of time after downloading a part if the preceding part has not completed as yet." + }, + "multipartMigrateThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use for concurrency when migrating multipart files between archival locations." + }, + "remoteNodeUploadTimeoutSpeedInKbps": { + "type": "integer", + "format": "int32", + "description": "When invoking upload in remote nodes, we need to set the timeout value based on the file size that is going to be uploaded, and the minimum upload speed we are willing to wait. This value is the configurable minimum upload waiting speed used to derive the thrift timeout value. The speed unit is in kilo-bits (not bytes) per second." + }, + "shouldPopulateSlaDomainsForMetadataRecovery": { + "type": "boolean", + "description": "Flag to indicate whether to populate SLA domains during metadata recovery and refresh. Enabling this flag can potentially cause issues when reader-writer archival is combined with replication, since either metadata recovery or replication can cause SLA domain metadata to get overwritten." + }, + "encryptedStorageAccountManagerCopyBufferSizeInKB": { + "type": "integer", + "format": "int32", + "description": "Buffer size used when copying data between two files." + }, + "thriftServerSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout in Ms for server timeout for Cloud storage service." + }, + "useProxyConfigForFetchingCertificates": { + "type": "boolean", + "description": "Flag to enable/disable the usage of proxy configuration for fetching s3compatible object stores certificate." + }, + "instantTieringMaxChainLength": { + "type": "integer", + "format": "int32", + "description": "Maximum chain length when instant tiering is enabled. A full upload will be triggered once the chain length reaches this limit on target tier." + }, + "archivalThrottlingPort": { + "type": "integer", + "format": "int32", + "description": "Port on which to throttle traffic if user has setup archival throttling." + }, + "tierSnapshotsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Time interval between successive instances of the snapshot tiering job." + }, + "tieringRetentionCutoffMultiplier": { + "type": "number", + "format": "double", + "description": "Factor by which we multiply an SLA's daily or hourly snapshot retention to determine the minimum age a snapshot can have before it can be considered for tiering." + }, + "tieringCostFactor": { + "type": "number", + "format": "double", + "description": "Factor by which we multiply the estimated tiered storage and compute costs for performing chain reversal and tiering, when comparing with storage costs in hot storage. This can be used to tune the trade-off between paying for compute upfront versus saving on long-term storage costs, as well as for testing. This can be set to 0.0 to disable cost modeling entirely." + }, + "minSamplesToValidateAfterFileUpload": { + "type": "integer", + "format": "int32", + "description": "Minimum number of samples to validate after uploading a file." + }, + "maxSamplesToValidateAfterFileUpload": { + "type": "integer", + "format": "int32", + "description": "Maximum number of samples to validate after uploading a file." + }, + "numSamplesToValidatePerGBAfterFileUpload": { + "type": "integer", + "format": "int32", + "description": "Number of samples to validate after uploading a file per GB. The actual number of samples to validate is bounded between minSamplesToValidateAfterFileUpload and maxSamplesToValidateAfterFileUpload." + }, + "chunkSizeToReadForValidationForUploadedFileInBytes": { + "type": "integer", + "format": "int32", + "description": "Chunk size of a sample to read for validation after uploading a file." + }, + "usePublicIpForBolt": { + "type": "boolean", + "description": "Use public IP for connecting to bolt instance. Note that we use private IP by default which expects the customer to have VPN set up. Ensure that Security Group used for storm instance allows traffic on public IP." + }, + "fileDownloadRetryCountForDisasterRecovery": { + "type": "integer", + "format": "int32", + "description": "Retry count for file download from archival location during archival disaster recovery." + }, + "minRequiredSpaceForUploadingBlobstoreMetadataInMB": { + "type": "integer", + "format": "int32", + "description": "Minimum required space on the archival location in order to upload blobstore metadata backup files." + }, + "numRetriesForUploadJobInstancePersist": { + "type": "integer", + "format": "int32", + "description": "Number of retries when persisting upload job config updates to metadata store. This is needed to handle transient failures when persisting updates due to backpressure or other issues." + }, + "maxSleepDurationForUploadJobInstancePersistInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum time to sleep in milliseconds between retries when persisting upload job instance updates to metadata store." + }, + "enableContentBasedValidationAfterUploadForCloud": { + "type": "boolean", + "description": "Enable content based validation of uploaded file to public cloud targets (S3, Azure, GCP). When enabled, we read back a few chunks and compare it with local file for validation. See chunkSizeToReadForValidationForUploadedFileInBytes, numSamplesToValidatePerGBAfterFileUpload, maxSamplesToValidateAfterFileUpload, and minSamplesToValidateAfterFileUpload for number of chunks and their size to validate." + }, + "sleepTimeBeforeReadInMs": { + "type": "integer", + "format": "int32", + "description": "Time to sleep before reading back a just written file to archival target. This is applied for NFS, QStar, and private object store targets." + }, + "shouldArchivalJobSkipRetryAcrossUpgrades": { + "type": "boolean", + "description": "Flag to indicate if the running UPLOAD and DOWNLOAD jobs should skip retry after upgrade if executing reversible tasks at the time of upgrade. The job instance will instead fail post upgrade." + }, + "archivalNetworkThrottlePortTypeOverride": { + "type": "string", + "description": "Override flag to determine which archival location types will affect the network throttle token distribution. Possible values are None, HTTPS and NFS. We will automatically detect port 443 as HTTPS and 2049 as NFS if set to None. This config only needs to be changed if the default egress port for an archival location has been customized AND the customer has both object stores and NFS locations configured." + }, + "readerRecoveryMapReduceMaxChildInstancesGlobally": { + "type": "integer", + "format": "int32", + "description": "Maximum number of reader recovery job child instances that can be scheduled concurrently globally." + }, + "shouldReaderRecoveryUseMapReduce": { + "type": "boolean", + "description": "Flag to indicate if the metadata recovery/refresh job uses JFL map reduce framework to distribute workload across all nodes. The config readerRecoveryMapReduceMaxChildInstancesPerNode from thor_local_config defines the maximum number of child instances that can be scheduled concurrently per node." + }, + "parallelizablePopulateArchivedMetadataChildRetryCount": { + "type": "integer", + "format": "int32", + "description": "Retry count for parallel reader recovery child job instances that populates the downloaded metadata into the cluster." + }, + "bufferTimeForConsolidationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Buffer time used by the consolidation job to wait for a longer duration than the estimated time for a dependent job instance to terminate." + }, + "populateMetadataTaskThrottleRetryCount": { + "type": "integer", + "format": "int32", + "description": "Retry count for acquiring throttle in populate archived metadata task of metadata recovery/refresh job. High default retry count is set here to allow waiting up to an hour to grep the throttle since some of the metadata refresh job can run for up to an hour." + }, + "timeIntervalForLoggingSnowFlakeMetricsInMinutes": { + "type": "integer", + "format": "int32", + "description": "The connectivity check job logs data location metrics which are parsed by Apollo extractors. This config determines how frequently these metric logs are written." + }, + "sleepTimeBetweenDeregisterRetriesInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time between retries for deregistering files on archive." + }, + "recoveredSnapshotVerifierArchivalLocationFileCacheSize": { + "type": "integer", + "format": "int32", + "description": "Size of fixed size cache on archival location files used while verifying recovered snapshots while connecting to the archival location as a reader. If this value is 0 or lower, the cache is not enabled." + }, + "skipOnPremConsolidationForOlderSnapshotChains": { + "type": "boolean", + "description": "Skip consolidation of expired snapshots that are part of the older snapshot chain(s) for on-premise archival locations. When set to true, only the snapshots that are expired and belong to the latest snapshot chain will be considered for consolidation." + }, + "skipCloudConsolidationForOlderSnapshotChains": { + "type": "boolean", + "description": "Skip consolidation of expired snapshots that are part of the older snapshot chain(s) for archival locations which use cloud compute. When set to true, only the snapshots that are expired and belong to the latest snapshot chain will be considered for consolidation." + }, + "opentracingSamplingStrategy": { + "type": "string", + "description": "Jaeger Opentracing strategy. The default strategy samples at 0.000001 probability." + }, + "opentracingSamplingDurationMsec": { + "type": "integer", + "format": "int32", + "description": "Frequency by which new Sampling strategies are polled by Jaeger." + }, + "validateCreatedPatchFileOnUpload": { + "type": "boolean", + "description": "Whether to patch file validation in the upload path by having snapshot server read in the files." + }, + "useSplitSubGroupUploadJobStateForNewJobs": { + "type": "boolean", + "description": "Whether to use SplitSubGroupUploadJobState for new Upload jobs." + }, + "archivalHistoricalStatsMaxRawValueCount": { + "type": "integer", + "format": "int32", + "description": "Max number of raw values stored for historical archival job stats." + }, + "archivalHistoricalStatsMaxSecondaryRawValueCount": { + "type": "integer", + "format": "int32", + "description": "Max number of secondary raw values stored for historical archival job stats. Secondary raw values include min and max lists per stat." + }, + "shouldPersistArchivalJobsHistoricalStats": { + "type": "boolean", + "description": "Boolean flag to decide whether to persist archival job historical stats to the metadatastore." + }, + "timeToPruneArchivalJobHistoricalStatsInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum amount of minutes between two successive attempts to clean up the archival historical job stat." + }, + "historicalJobStatsStalenessThresholdInMins": { + "type": "integer", + "format": "int32", + "description": "The time after which we will prune a historical job stats row. i.e if a row in the hisorical job stats table has not been updated in the amount of time specified in this config, we will delete it." + }, + "shouldValidateCreatedPatchFile": { + "type": "boolean", + "description": "Whether to perform patch file validation by having snapshot server read in the files." + }, + "disableReverseForSmartTiering": { + "type": "boolean", + "description": "When set to true, do not perform reverse operation on blobstore chains in the archive for smart tiering. When an entire chain becomes eligible, all blobs will be tiered down to cold storage tier." + }, + "thriftRequestTimeoutForCleanupStaleFilesInMs": { + "type": "integer", + "format": "int32", + "description": "The timeout for the thrift response when cleaning up stale upload files on a remote node." + }, + "pathTranslationReadBatchSize": { + "type": "integer", + "format": "int32", + "description": "Number to absolute path translations to be fetched in a single database query." + }, + "minWaitTimeForCloudConsolidationInDays": { + "type": "integer", + "format": "int32", + "description": "Minimum time to wait after the previous execution of consolidation when using cloud compute." + }, + "updateCssCacheBlockSizeOnBoltForArchiveOperations": { + "type": "boolean", + "description": "If this flag is enabled, update the config parameter value for the cache block size in Cloud Storage Service on a Bolt instance which is assigned to run archive consolidation or reverse jobs. This value is derived from the thor config parameter cacheBlockSizeInKB." + }, + "lpfiracMaxChunkSizeInKbCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max amount contiguous data (in KB) to be downloaded in one API call in the logical patch file image read ahead cache. This cache is used in the two step download mechanism for cloud location types including S3, Azure and GCP." + }, + "lpfiracMaxHoleSizeInKbCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max distance (in KB) between data offsets such that we consider them to be contiguous and coalesce them into larger reads. Used by the logical patch file image read ahead cache. This cache is used in the two step download mechanism for cloud location types including S3, Azure and GCP." + }, + "lpfiracMaxChunkSizeInKbNonCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max amount contiguous data (in KB) to be downloaded in one API call in the logical patch file image read ahead cache. This cache is used in the two step download mechanism for non cloud location types including NFS and S3Compatible object stores." + }, + "lpfiracMaxHoleSizeInKbNonCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max distance (in KB) between data offsets such that we consider them to be contiguous and coalesce them into larger reads. Used by the logical patch file image read ahead cache. This cache is used in the two step download mechanism for non cloud location types including NFS and S3Compatible object stores." + }, + "migrationBaseDirForDownload": { + "type": "string", + "description": "Base dir used for intermediate files to be downloaded to during migration. Subdirectories under this path will be created for different jobs." + }, + "shouldUseTwoStepDownload": { + "type": "boolean", + "description": "Determines if the two step download mechanism should be used to download a snapshot. This download mechanism first creates a Logical Patch File Image and then downloads data from the archival location using a concurrent prefetching cache. This mechanism is expected to have signficantly improved performance for most use cases." + }, + "downloadDirPrefixPathInSdScratch": { + "type": "string", + "description": "Download snapshot job root scratch directory." + }, + "convertLogicalPatchFileImageRetryCount": { + "type": "integer", + "format": "int32", + "description": "Number of times we will retry the ConvertLogicalPatchFileImage RPC in the two step download mechanism." + }, + "minWaitTimeToPersistIntegrityCheckpointInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum wait time for persisting the checkpoints for the archival integrity report. This is needed to control the frequency of jobconfig updates when processing many snapshots. Other parameters may still cause updates to be more frequent." + }, + "numSnapshotsToBatchToPersistIntegrityCheckpointInMinutes": { + "type": "integer", + "format": "int32", + "description": "Minimum number of snapshots to batch for persisting the checkpoints for the archival integrity report. This is needed to control the frequency of jobconfig updates when processing many snapshots. Other parameters may still cause updates to be more frequent." + }, + "maxArchivalSnappableMigrationJobsOnCluster": { + "type": "integer", + "format": "int32", + "description": "Maximum number of archival protected object migration jobs that can run concurrently on a cluster. This is the upper limit of jobs to control, if needed. The per node configuration parameter controls how many jobs can run per node, and scales linearly with the number of nodes in the cluster." + }, + "supportedSourceLocationTypesForMigration": { + "type": "string", + "description": "Specifies a comma separated list of supported source archival location types for archival migration." + }, + "supportedTargetLocationTypesForMigration": { + "type": "string", + "description": "Specifies a comma separated list of supported target archival location types for archival migration." + }, + "maxRetriesForExecuteMigrate": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of retries of the ExecuteMigrate task of archival migration job for resumability." + }, + "scheduleOneTimeIntegrityReportJobPost52": { + "type": "boolean", + "description": "Whether to schdule a one time archival integrity report." + }, + "oneTimeIntegrityReportJobReportPath": { + "type": "string", + "description": "The absolute path to be used as the report path in the one time Archival Integrity Report scheduled on upgrade to 5.2.1-p2+." + }, + "oneTimeIntegrityReportJobEarliestSnapshotTimeStamp": { + "type": "integer", + "format": "int32", + "description": "The earliest creation date to include a snapshot for validation in the One Time Integrity Report job kicked off on upgrade." + }, + "enableContentBasedValidationForMigration": { + "type": "boolean", + "description": "Enable content based validation of migrated files. When enabled, we read back a few chunks and compare it with the source file for validation. See chunkSizeToReadForValidationForMigratedFileInBytes, numSamplesToValidatePerGBAfterMigration, maxSamplesToValidateAfterMigration, and minSamplesToValidateAfterMigration for number of chunks and their size to validate." + }, + "minSamplesToValidateAfterMigration": { + "type": "integer", + "format": "int32", + "description": "Minimum number of samples to validate after migrating a file." + }, + "maxSamplesToValidateAfterMigration": { + "type": "integer", + "format": "int32", + "description": "Maximum number of samples to validate after migrating a file." + }, + "numSamplesToValidatePerGBAfterMigration": { + "type": "integer", + "format": "int32", + "description": "Number of samples to validate after migrating a file per GB. The actual number of samples to validate is bounded between minSamplesToValidateAfterFileUpload and maxSamplesToValidateAfterFileUpload." + }, + "chunkSizeToReadForValidationForMigratedFileInBytes": { + "type": "integer", + "format": "int32", + "description": "Chunk size of a sample to read for validation after migrating a file." + }, + "enableExpireMigratedSnapshotsOnSource": { + "type": "boolean", + "description": "When false, do not expire the snapshots on the source archival location after migration. The snapshots will exist on both the source and target archival locations post migration. User will need to expire the snapshots on the source after confirming that the source copy is no longer needed. Set this parameter to true to expire the snapshots automatically on the source location after migration. The recommendation is to keep it disabled and expire manually." + }, + "minBatchTimeForJobStatusUpdateInSecs": { + "type": "integer", + "format": "int32", + "description": "Minimum time to batch the progress updates to the job_status table for archiving jobs. This helps reduce the backpressure issues by reducing the frequency of updates to the job_status table." + }, + "sizingScriptPath": { + "type": "string", + "description": "Command for sizing script." + }, + "numFingerprintsToValidateForMigrate": { + "type": "integer", + "format": "int32", + "description": "Number of fingerprints to validate on target archival location as part of migrating snapshots. The validation occurs at subgroup level for every migrated snapshot content." + }, + "maxTierExistingSnapshotsJobOnCluster": { + "type": "integer", + "format": "int32", + "description": "Maximum number of TIER_EXISTING_SNAPSHOTS job that can run concurrently on a cluster." + }, + "parallelizableExecuteTierExistingSnapshotsChildRetryCount": { + "type": "integer", + "format": "int32", + "description": "Retry count for parallel execute tier existing snapshots child job instances that is responsible to tier existing snapshots for a single snappable." + }, + "immutabilityDeltaDurationForMinutelyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to hourly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "allowMinuteFrequencySlaWithImmutableArchives": { + "type": "boolean", + "description": "Specifies whether to allow an SLA with a minute frequency an and immutable archival target. By default this is false, as customers are likely to incur high costs, however we expose this config to allow functional tests to leverage faster SLAs." + }, + "immutabilityDeltaDurationForHourlyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to hourly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "immutabilityDeltaDurationForDailyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to daily snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "immutabilityDeltaDurationForWeeklyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to weekly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "immutabilityDeltaDurationForMonthlyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to monthly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "immutabilityDeltaDurationForQuarterlyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to quarterly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "immutabilityDeltaDurationForYearlyFreqInDays": { + "type": "string", + "description": "Specifies a pair of values in days. Immutability locks must have a duration longer than the archival retention duration of the SLA Domain assigned to yearly snapshots. The duration of an immutability lock must exceed the archival retention duration of the SLA Domain by the first value but cannot exceed that duration by more than the second value." + }, + "forceFileDownloadBasedMigration": { + "type": "boolean", + "description": "Specifies if file download based archival migration should be forced irrespective of any other parameters. When set to true, a file is downloaded to the cluster from the source archival location, and is then uploaded to the target archival location. When set to false, decision about stream based or file download based migration is taken based on internal parameters." + }, + "maxSuccessiveFailedUploadsLimitForImmutableLocations": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of successive upload job failures allowed to an immutable archival location before we block running new instances of upload job for that location." + }, + "timeIntervalForPostingSkipUploadJobDueToFailureHistoryInMin": { + "type": "integer", + "format": "int32", + "description": "Specifies the minimum amount of time between posting events when an UPLOAD job does not upload a snapshot to an archival location because previous upload jobs to that location failed. This duration applies to a specific job ID that is uploading to a specific archival location." + }, + "isReaderVersionCheckEnabled": { + "type": "boolean", + "description": "Whether to require a cluster connecting as reader to an archival location be at least on the version of the primary cluster owner of the archival location. By default this is true, however it can be toggled to false to allow testing where clusters require access to a hardcoded location created on a newer release." + } + } + }, + "GlobalVcdConfig": { + "type": "object", + "properties": { + "deleteVcdJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete vcd jobs per node." + }, + "exportVappSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vCD Export jobs per node." + }, + "vappSnapshotInstantRecoveryJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vCD Instant Recovery jobs per node." + }, + "vappSnapshotInstantRecoveryInternalJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vCD VM Instant Recovery jobs per node." + }, + "vappSnapshotExportInternalJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vCD VM Export jobs per node." + }, + "refreshVcdNumRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for vCD refresh jobs." + }, + "refreshVcdJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent vCD refresh jobs per node." + }, + "refreshVcdJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of refresh vcd jobs in minutes." + }, + "vmwareVcdVappSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for the vcd vapp snapshot job." + }, + "newVappImportThrottleWaitTimeInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout time in seconds to acquire throttle." + }, + "vcdExportJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the vcd vapp export job will cancel child export jobs." + }, + "vcdInstantRecoveryInteralJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the vcd vapp export job will poll for completion of child export jobs." + }, + "vcdInstantRecoveryInteralJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the vcd vapp export job will cancel child export jobs." + }, + "vcdRecoveryLiveMountPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the vcd vapp recovery jobs will poll for completion of child live mount jobs." + }, + "vcdRecoveryLiveMountTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the vcd vapp recovery jobs will cancel child live mount jobs." + }, + "vcdGenericTaskTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Number of seconds to wait for a vapp delete task before throwing an error." + }, + "vcdRecoveryUnmountPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the vcd vapp recovery jobs will poll for completion of child unmount jobs." + }, + "vcdRecoveryUnmountTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the vcd vapp recovery jobs will cancel child unmount jobs." + }, + "vappSnapshotJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the vcd vapp snapshot job will poll for completion of child snapshot jobs." + }, + "vappSnapshotJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The duration after which the vcd vapp snapshot job will cancel child snapshot jobs." + }, + "vcdVcenterRefreshJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies the duration of an interval in milliseconds. The vCD refresh job checks underlying vCenter refresh jobs for completion each time this interval elapses." + }, + "vcdVcenterRefreshJobSpawnTimeoutDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies a duration in milliseconds. The vCD refresh jobs stops checking for completion of underlying vCenter refresh jobs after this duration elapses." + }, + "shouldTriggerVcenterRefreshOnManualRefresh": { + "type": "boolean", + "description": "Boolean value indicating whether a manual vCD refresh from the API should trigger refresh for the underlying vCenters as well." + }, + "distributedBarrierTimeoutDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "The duration after which the distibuted barrier will stop waiting for all children register and await jobs." + }, + "simultaneousNumberOfLiveMountsPerEsxHostForVcdRecovery": { + "type": "integer", + "format": "int32", + "description": "The simultaneous number of live mounts to be used by vCD recovery jobs." + }, + "millisToSleepAfterUnregisteringVmDuringInstantRecovery": { + "type": "integer", + "format": "int32", + "description": "Duration for which the instant recovery task will sleep after unregistering a VM from vCenter." + }, + "numSessionRetriesInInstantRecoveryJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries if a vCD session call fails during an instant recovery job." + }, + "millisToSleepBetweenRetriesInInstantRecoveryJob": { + "type": "integer", + "format": "int32", + "description": "Duration for which the instant recovery job will sleep if a vCD session call fails." + }, + "vcdClusterLockDefaultExpiryDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Default expiry duration in seconds for vCD cluster operation lock." + }, + "vcdCreateOperationLockExpiryDurationInSeconds": { + "type": "integer", + "format": "int32", + "description": "Expiry duration in seconds for vCD create operation lock." + }, + "millisToSleepBetweenRetriesInPersistMetadataInRefreshJob": { + "type": "integer", + "format": "int32", + "description": "Duration for which the refresh job will sleep if a persist atomically call returns false." + }, + "numPersistMetadataRetriesInRefreshJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries if a persist atomically call returns false during vCD refresh job." + }, + "millisToSleepBetweenSessionRetriesInRefreshJob": { + "type": "integer", + "format": "int32", + "description": "Duration for which the refresh job will sleep if a vCD session call fails." + }, + "numSessionRetriesInRefreshJob": { + "type": "integer", + "format": "int32", + "description": "Number of retries if a vCD session call fails during an refresh job." + }, + "millisToTimeoutForVcdSessionCallValidation": { + "type": "integer", + "format": "int32", + "description": "Duration for which vCD request util should wait for vCD session call to finish before timing out." + }, + "secondsToSleepDuringImportTaskPolling": { + "type": "integer", + "format": "int32", + "description": "Seconds to sleep while polling for import task to terminate." + }, + "secondsToRetryBeforeGivingUpOnImportErrors": { + "type": "integer", + "format": "int32", + "description": "Duration for which we will tolerate errors in the vCD import status call before throwing an exception." + }, + "isBestEffortSynchronizationDisabled": { + "type": "boolean", + "description": "When set to true, best effort synchronization will be disabled for ALL vApps." + }, + "shouldVerifyHostnameForVcdCluster": { + "type": "boolean", + "description": "Whether to verify name in HTTPS session when connecting to vCD cluster." + }, + "shouldAllowNonValidatingVcdCluster": { + "type": "boolean", + "description": "Whether the vCD cluster allows non-validating connections. Users who want secure connections will need to generate valid certificates for their vCD cluster and import them to the Rubrik nodes." + }, + "tlsProtocolForVcdCluster": { + "type": "string", + "description": "TLS protocol to be used when connecting to vCD cluster." + }, + "numRetriesForAggressiveResourceContention": { + "type": "integer", + "format": "int32", + "description": "Number of retries for trying all the throttles to be acquired for taking snapshots for all the vm's in the vApp." + }, + "expectedPercentageOfVappsInAggressiveContentionState": { + "type": "integer", + "format": "int32", + "description": "The percentage probability that a vApp will be aggressively contending for resources before it gets a single synchronous snapshot." + }, + "numNonAggressiveContentionAfterSynchronizedSnapshot": { + "type": "integer", + "format": "int32", + "description": "Number of times we do not aggressively contend for resources even after taking a snapshot." + }, + "numContinuousAggressiveContentions": { + "type": "integer", + "format": "int32", + "description": "Number of times we can continuously contend for resources aggressively." + }, + "numNonSynchronizedSnapshotsBeforeNoContention": { + "type": "integer", + "format": "int32", + "description": "Number of unsynchronized snapshots that we take before which we decide not to contend for resources at parent level ever again for the vApp." + }, + "vappUploadJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes between vApp upload jobs." + }, + "useThinProvisioningForStorageMotion": { + "type": "boolean", + "description": "Whether to use thin provisioning while doing a storage motion after instant recovery." + }, + "templateConfigFileName": { + "type": "string", + "description": "FileName of the config XML file which is uploaded as the vAppTemplate." + }, + "shouldRefreshUnderlyingCatalogs": { + "type": "boolean", + "description": "Boolean value indicating whether a vCD refresh should refresh underlying vCD catalog and vApp template objects." + }, + "millisToSleepBetweenRetriesWhileExportingTemplate": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. When a vCD session call fails, the Export vApp Template job sleeps for the specified interval before retrying." + }, + "numRetriesForResourceContentionInVmSectionRestore": { + "type": "integer", + "format": "int32", + "description": "Specifies the number of times that a vCD virtual machine reconfiguration job attempts to acquire the lock when vCD virtual machines are being sequentially reconfigured." + }, + "vcdVmSectionRestoreSleepTimeBetweenRetriesInMillis": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in milliseconds. The Export VApp Template job sleeps for the specified interval between retries while updating sections of the vCD virtual machine." + }, + "maximumTimeToRetryForPowerOnTaskInMillis": { + "type": "integer", + "format": "int32", + "description": "Maximum duration in millis for which we retry the power-on task." + }, + "numberOfRetriesForVerifyingVmReplacement": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries before the vCD identifies the replacement of the underlying VM during instant recovery." + }, + "maximumTimeToRetryForSectionUpdateTaskInMillis": { + "type": "integer", + "format": "int32", + "description": "Maximum duration in millis for which we retry the section update task for each vcdVm." + }, + "shouldAllowRestoreOfInvalidNonIsolatedNetworks": { + "type": "boolean", + "description": "Boolean value indicating whether we should allow export of vCD vApp networks which are non isolated and do not have a valid parent network in the target vDC." + }, + "shouldRestoreDisconnectedNics": { + "type": "boolean", + "description": "Boolean value indicating whether we should restore disconnected VM NICs while restoring a vApp or a vApp Template." + }, + "shouldLogSdkRequests": { + "type": "boolean", + "description": "Boolean value to tell whether we should log sdk responses." + }, + "rehydrationTimeInSecs": { + "type": "integer", + "format": "int32", + "description": "Number of seconds before which we attempt an auto rehydration of the session object." + }, + "vcdThriftServerTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout of 30 minutes for each session call over thrift to PyVmwareThriftService." + }, + "vcdAdvancedConfigsToExcludeForVmMount": { + "type": "string", + "description": "List of VMware virtual machine advanced configs to be excluded when mount of a virual machine is called in vApp restore jobs." + }, + "vcdAdvancedConfigsToOverwriteForVmMount": { + "type": "string", + "description": "List of VMware virtual machine advanced configs to be overwritten when mount of a virual machine is called in vApp restore jobs. Sample - \"removeNice:false,removeNics:true\"." + }, + "ignoreVappNetworkAdditionDuringExport": { + "type": "boolean", + "description": "Silently swallow exceptions if vapp reconfigure networks fails during a vApp export job." + }, + "shouldRetainOriginalVmNameInIR": { + "type": "boolean", + "description": "Retain original VM in IR instead of Rubrik-Restored name." + }, + "shouldRemoveNicsFromOriginalVMInIR": { + "type": "boolean", + "description": "Remove NICs from original VM while performing an IR." + }, + "shouldIsolateTemplateNetworks": { + "type": "boolean", + "description": "Convert vApp template networks to isolated during snapshot." + }, + "shouldFilterDatastoresForIrBasedOnHost": { + "type": "boolean", + "description": "Filter datastores for vCD vApp Instant Recovery based on compatibility with host." + }, + "numRetriesDefaultForSessionCalls": { + "type": "integer", + "format": "int32", + "description": "Default number of retries for the vcd session calls." + }, + "sleepDurationInSecondsbetweenSessionCallRetries": { + "type": "integer", + "format": "int32", + "description": "Duration(in seconds) to sleep before attempting the next session call." + }, + "millisToSleepAfterPoweringOnBlankVmDuringIR": { + "type": "integer", + "format": "int32", + "description": "Duration for which the instant recovery task will sleep after powering on the Blank VM in the unregister step." + }, + "shouldPowerOnBlankVmDuringIR": { + "type": "boolean", + "description": "Boolean flag to determine if the instant recovery task will sleep after powering on the Blank VM in the unregister step." + } + } + }, + "GlobalVolumeGroupConfig": { + "type": "object", + "properties": { + "volumeGroupSnapshotJobRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for the Volume Group snapshot job." + }, + "volumeGroupIngestUseSingleReplica": { + "type": "boolean", + "description": "Whether to use single-replica (mirrored) or Reed-Solomon for Volume Group ingest." + }, + "volumeGroupIngestStatusPollFreqInMs": { + "type": "integer", + "format": "int32", + "description": "Number of milliseconds to wait between agent copy volume job status checks." + }, + "volumeGroupMaxParallelIngest": { + "type": "integer", + "format": "int32", + "description": "Maximum number of disks to ingest in parallel." + }, + "defaultBaseMountPath": { + "type": "string", + "description": "Default base mount path if user specifies a live mount on host, but gives no host mount paths. Rubrik will generate a mount path with this directory serving as the base." + }, + "enableVolumeGroupIncremental": { + "type": "boolean", + "description": "Whether to enable volume group incrementals or not." + }, + "forceFullForIncrementalFailure": { + "type": "boolean", + "description": "Whether force full volume group backup when encounter incrementals failure." + }, + "enableVfdForRefs": { + "type": "boolean", + "description": "Whether to use VFD for ReFS incrementals or not." + }, + "vfdBlockSize": { + "type": "integer", + "format": "int32", + "description": "VFD block size." + }, + "verifyVolumeSnapshotPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of the data in a Volume to verify. The Rubrik cluster verifies the specific percentage of the data before accepting the snapshot as valid. Set to 100 to verify the entire snapshot, or set to a lower number for a faster validation process. For less than 100 percent, the Rubrik cluster performs verification on randomly selected data from the snapshot. When set to 0, the Rubrik cluster does not validate the snapshot data. Validation performance will be highly reduced when the percentage is high but less than 100 due to the random search process. Use low percentage for normal validation and 100 for large scale validation." + }, + "verifyIncrementalsMaxBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of bytes to verify when validating volume incremental snapshot. The Rubrik cluster performs verification on randomly selected data from the snapshot up to the specified number of bytes." + }, + "sdfsServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout while connecting to SDFS service." + }, + "volumeGroupSmbNameLength": { + "type": "integer", + "format": "int32", + "description": "Length of Samba share name for volume group live mount." + }, + "useRouteBasedIpSelectionForVolumeGroup": { + "type": "boolean", + "description": "Use route-based IP selection for volume group." + }, + "volumeDataTransferParallelism": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent requests for transferring volume snapshot data from a remote host to the Rubrik cluster." + }, + "volumeDataTransferBlockSize": { + "type": "integer", + "format": "int32", + "description": "Data block size in MB used in transferring volume snapshot data from remote hosts to the Rubrik cluster." + }, + "noBufferFileAlignmentBlockSize": { + "type": "integer", + "format": "int32", + "description": "Alignment block size to be used in no buffer file operations when physical sector size is not available. The value should be a multiple of the disk physical sector size." + }, + "volumeGroupEnableParallelFetch": { + "type": "boolean", + "description": "Enables parallel fetching of snapshot data for volume groups." + }, + "volumeGroupBackupToVhdxOnW2K8R2": { + "type": "boolean", + "description": "When using fast VHDX creator for volume backups on Windows 2008 R2, use VHDX for virtual disk instead of VHD." + }, + "enableVolumeGroupFastVirtualDiskBuild": { + "type": "boolean", + "description": "Feature flag for creating virtual disk image directly, for volume groups." + }, + "failSnapshotOnValidationFailure": { + "type": "boolean", + "description": "Feature flag for failing backup on snapshot validation failure. When the value is set to false only an error is logged and the backup job succeeds." + }, + "migrateFastVirtualDiskBuild": { + "type": "string", + "description": "A flag that controls the use of the fast VHDX builder during volume group migration. When the value of the flag is 'Error-Only,' the volume group uses the fast VHDX builder when a pre-5.1 volume group backup operation fails during the fetch phase. When the value of the flag is 'All,' the volume group uses the fast VHDX builder the next time the volume group is backed up. Any other value disables the fast VHDX builder. This flag is used in combination with the maxFullMigrationStoragePercentage value." + }, + "maxFullMigrationStoragePercentage": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the total available storage space. When performing a full volume group backup operation would bring the total used storage space above this threshold, the cluster takes incremental backups instead. This value is used in combination with the migrateFastVirtualDiskBuild flag." + }, + "volumeGroupMaxConcurrentFetchChildJobs": { + "type": "integer", + "format": "int32", + "description": "Max number of child jobs running, to fetch the snapshot data, across all the nodes in the cluster." + }, + "volumeGroupMountPrefetchEnabled": { + "type": "boolean", + "description": "Whether enable prefetch for mergedSpec." + }, + "volumeGroupMountCachingEnabled": { + "type": "boolean", + "description": "Whether enable caching for mergedSpec." + }, + "volumeGroupRestoreReadAheadEnabled": { + "type": "boolean", + "description": "Whether to use Sequential Read-Ahead in the volume group export jobs." + }, + "kbsWhiteListFor2K8R2": { + "type": "string", + "description": "Whether to enable volume group incrementals or not." + }, + "volumeGroupParallelBuildMaxChildInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "The maximum number of child instances per node for the VOLUME_GROUP_PARALLEL_BUILD_VIRTUAL_DISK_IMAGE parallelizable task." + }, + "volumeGroupParallelBuildMaxChildInstancesPerParentJobPerNode": { + "type": "integer", + "format": "int32", + "description": "The maximum number of child instances per node per parent job for the VOLUME_GROUP_PARALLEL_BUILD_VIRTUAL_DISK_IMAGE parallelizable task." + }, + "volumeGroupMaxRetryForIngestTask": { + "type": "integer", + "format": "int32", + "description": "The maximum number of retry for volume group parallel backup job." + }, + "enableOsVersionCheckForVolumeGroupSnapshot": { + "type": "boolean", + "description": "Controls host operating system version check for volume group snapshot jobs." + }, + "volumeGroupEnablePipelinedFileRestore": { + "type": "boolean", + "description": "Whether to use the Fileset restore pipeline when performing file restore." + }, + "volumeGroupMaxRetryForRetryableTask": { + "type": "integer", + "format": "int32", + "description": "The maximum number of retry for volume group retryable tasks defined in volume group multi-node backup job." + } + } + }, + "GlobalVsphereConfig": { + "type": "object", + "properties": { + "vmwareRefreshGetAgentStatusTimeout": { + "type": "integer", + "format": "int32", + "description": "Timeout for getting agent status for individual vm in seconds." + }, + "vmwareRefreshGetAgentStatusThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads in pool for setting vm agent status on refresh." + }, + "vmwareRefreshPingVmTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout in seconds to ping all of a VM's reported IP addresses in the vCenter refresh job." + }, + "createSnapshotDiskMetadataParallelism": { + "type": "integer", + "format": "int32", + "description": "Parallelism to be used when creating the snapshot disk metadata in the backup job." + }, + "vmwareIoFilterName": { + "type": "string", + "description": "Name of Rubrik's Vmware Io Filters." + }, + "vmwareIoFilterVendorCode": { + "type": "string", + "description": "Vmware's Io Filter Vendor Code for Rubrik." + }, + "cdpStoragePolicyName": { + "type": "string", + "description": "Name of cdp-enabled Storage Policies." + }, + "cdpStoragePolicyDescription": { + "type": "string", + "description": "Description of cdp-enabled Storage Policies." + }, + "cdpStoragePolicySubprofileName": { + "type": "string", + "description": "Name of storage policy subprofile that defines the IO Filter info." + }, + "vmRestoreCachingEnabled": { + "type": "boolean", + "description": "Whether enable cache for MJF in VMware export jobs." + }, + "vmRestoreMjfReadAheadEnabled": { + "type": "boolean", + "description": "Whether enable read ahead for MJF in VMware restore jobs." + }, + "skipPurgeIfFetchedZeroDatacenters": { + "type": "boolean", + "description": "Whether we should skip purging all datacenter objects during the vcenter refresh job if the vsphere session client fetched 0 datacenters. We have run into customer issues like." + }, + "vmwareThriftServerTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "We are adding a 1 minute timeout on Pyvmomi and Vmware Automation SDK calls to prevent a slowdown of refresh if the call hangs for some reason." + }, + "overrideFilterVersion": { + "type": "string", + "description": "The filter version to which clusters will be installed/upgraded. We use \"{}\" as the default so we can use the backend logic to deserialize this as a map and determine which override version to use with which filter family version. If a certain environment requires a change of the default value, this config value should be set in the format '{ \"filter-family-version\":\"x.y.z-1OEM.\"'. The value of the key should exactly match the IO filter version returned by the VMware APIs. This can be used if we hit a filter regression and need to hotpatch a new filter version on a customer environment. The filter family versions can be checked in the ioFilterVersionFamilies config. The new filter bundle being hot patched must be placed in the same directory as the other filter bundles." + }, + "ioFilterTaskSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep 30 seconds between checking IO Filter task status." + }, + "ioFilterTaskRetries": { + "type": "integer", + "format": "int32", + "description": "Retry checking IO Filter task status 60 times. Since we sleep for 30 seconds, this is a 30 minute wait period." + }, + "policyTaskSleepInMs": { + "type": "integer", + "format": "int32", + "description": "Sleep 10 seconds between checking Storage Policy task status." + }, + "policyTaskRetries": { + "type": "integer", + "format": "int32", + "description": "Retry checking Storage Policy task status 30 times. Since we sleep for 10 seconds, this is a 5 minute wait period." + }, + "vmwareBackupMultiNodeFetchData": { + "type": "boolean", + "description": "Whether use multiple nodes for VMware backup data fetch or not." + }, + "maxThrottleTimeInSecs": { + "type": "integer", + "format": "int32", + "description": "Max amount of time to wait for throttle for storage policy ops versus vm snapshot ops." + }, + "serviceInstanceTimeoutCheckPeriodInSeconds": { + "type": "integer", + "format": "int32", + "description": "We wait 15 minutes before checking if a service instance in a vsphere session is inactive and needs to be refreshed. VMware's timeout period is technically 30 minutes, so the 15 minutes should be safe." + }, + "ignoreZeroedBlocksDuringExport": { + "type": "boolean", + "description": "Whether to ignore blocks with null values during vmdk export." + }, + "numberImportOutstandingIOs": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent VixDiskLib async reads during import." + }, + "numberExportOutstandingIOs": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent VixDiskLib async writes during export." + }, + "shouldSkipArchivedDatastoresInHierarchyCache": { + "type": "boolean", + "description": "Whether we should skip loading archived datastores in the cached vCenter hierarchy, which can help reduce memory pressure." + }, + "queryAllocatedBlocksChunkSize": { + "type": "integer", + "format": "int32", + "description": "Chunk size (number of sectors) when querying for allocated blocks. We set this to 2048 by default because that's the default number of sectors ingested per read call." + }, + "tcpSocketRWTimeout": { + "type": "integer", + "format": "int32", + "description": "TCP socket read/write timeout in seconds." + }, + "vmwareMultiNodeBackupMaxInstanceNum": { + "type": "integer", + "format": "int32", + "description": "Multiplier of the number of nodes in the OK status on the cluster. The multiplication is the maximum number of concurrent child job instances of VMware multi-node backup." + }, + "maxInstanceNumPerVmwareMultiBackupJob": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed number of concurrent child job instances on the cluster for a VMware multi-node backup job. This is used to limit the total number of connections to an ESXi host concurrently. It is necessary for the cluster which has large number of nodes and VMware virtual machine with large disk size." + }, + "exportBlockSize": { + "type": "integer", + "format": "int32", + "description": "Number of bytes per write to VixDiskLib during export." + }, + "importBlockSize": { + "type": "integer", + "format": "int32", + "description": "Number of bytes per read from VixDiskLib during import." + }, + "hostFilterStatusTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout period for getting esx host io filter information. Vmware doesn't return for 10 minutes so we put our own timeout to break out early." + }, + "numThreadsGetHostFilterStatus": { + "type": "integer", + "format": "int32", + "description": "The number of threads we create at a time to get host filter status." + }, + "retainDvsNics": { + "type": "boolean", + "description": "Whether to randomly connect/remove NICs when recovering a vm. This setting only applies if the original network connection no longer exists, for example when recovering to a different vCenter. In this case, if set to true we leave the NICs installed but connected to a random network. If set to false we remove the NICs completely." + }, + "maxRetryForParallelIngestTask": { + "type": "integer", + "format": "int32", + "description": "The maximum retry for vmware ingest task in multi-node backup job. 0 means no retry." + }, + "maxRetryForRetryableJobTask": { + "type": "integer", + "format": "int32", + "description": "The maximum retry for vmware retryable job tasks in multi-node backup job. 0 means no retry." + }, + "maxRetryForExportTask": { + "type": "integer", + "format": "int32", + "description": "The maximum number of retries for any of the retry-able tasks in the export job. Allows us to handle errors that could occur during large export jobs. 0 means no retries." + }, + "verifyShardBlobStoreEnabled": { + "type": "boolean", + "description": "Whether additional verification of sharded blobstore against fingerprint file should be performed." + }, + "multiNodeMaxChildInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "The concurrent number of multi-node child job instance for ingest can be scheduled per node. The number of actual job instances will depends on the smaller value of this parameter and local quicksliver config of ChildrenThreadPoolSize." + }, + "vmwareExportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "The concurrent number of VMware export job can be scheduled per node." + }, + "excludedVmcResourcePoolName": { + "type": "string", + "description": "The Management Resource Pool cannot be used for backup or recovery operations on VMC environments. Specify the name of the Management Resource Pool in this value in order to properly exclude the Management Resource Pool from backup or recovery operations." + }, + "allowVsphereQuiesceForVappConsistent": { + "type": "boolean", + "description": "Allow taking vsphere quiesce snapshot for vm with vapp consistent configuration before fall into crash consistent snapshot." + }, + "onlyAllowArrayIntegrationBackup": { + "type": "boolean", + "description": "A Boolean that specifies whether to fall back to a standard VMware backup job when the PureArray integration backup is enabled but not possible for the VMware virtual machine. When this value is false, a failed PureArray integration backup job falls back to a standard VMware backup job." + }, + "vmwareRefreshSetAgentStatusEnabled": { + "type": "boolean", + "description": "A flag indicating whether setAgentStatuses is enabled on refresh." + }, + "excludedVmcDatastoreName": { + "type": "string", + "description": "The vsanDatastore cannot be used for backup or recover operations on VMC environments. Specify the name of the Datastore in this value in order to properly exclude it." + }, + "vddkServerVersion": { + "type": "string", + "description": "Version of VDDK server that we are running on the cluster. This is compared against the version running on the hotadd proxy virtual machines." + }, + "forceNBDSSL": { + "type": "boolean", + "description": "Force backup to use nbd ssl even on VMC vCenters." + }, + "clientObjectPoolMaxTotalPerKey": { + "type": "integer", + "format": "int32", + "description": "max number of clients per key in the client object pool. It's a configuration for VapiClient." + }, + "clientObjectPoolTimeBetweenEvictionRunsMillis": { + "type": "integer", + "format": "int32", + "description": "Time between object pool eviction thread runs in millis. By default, the value is 30 minutes. If it's -1, then there will be no thread to evict object. It's a configuration for VapiClient." + }, + "clientObjectPoolMinEvictableIdleTimeMillis": { + "type": "integer", + "format": "int32", + "description": "Sets the minimum amount of time an object may sit idle in the pool before it is eligible for eviction. By default, it's one hour. It's a configuration for VapiClient." + }, + "numRetriesToGetIpAddressForDeployedVm": { + "type": "integer", + "format": "int32", + "description": "Specifies an integer. Attempts to fetch the IP address of newly deployed virtual machines are retried a number of times equal to the specified integer, with 15-second waits between retries. The default timeout is 120 seconds." + }, + "numDefaultAvailablePortsForProxyVms": { + "type": "integer", + "format": "int32", + "description": "The number of available iSCSI ports on a proxy virtual machine that can be used for HotAdd transport." + }, + "numOptimalAvailablePortsPerProxy": { + "type": "integer", + "format": "int32", + "description": "Number of ports per hotadd proxy for optimal throughput, should be less than or equal to numDefaultAvailablePortsForProxyVms." + }, + "hotaddProxyBandwidthLimit": { + "type": "integer", + "format": "int32", + "description": "Bandwidth limit in mbps per hotadd proxy vm. 0 is unlimited bandwidth." + }, + "hotAddProxyVmTagName": { + "type": "string", + "description": "The name of the VMware tag used to detect Rubrik hotadd proxy virtual machines." + }, + "hotAddProxyVmTagCategoryName": { + "type": "string", + "description": "The name of the VMware tag category used to detect Rubrik hotadd proxy virtual machines." + }, + "overrideHotAddProxiesPerDatastore": { + "type": "integer", + "format": "int32", + "description": "Can be used to override our calculations for how many hotadd proxies are needed per datastore. If set to 0 we fall back to our formula for calculating this number." + }, + "datastoreFreespaceThresholdForSnapshot": { + "type": "number", + "format": "double", + "description": "The threshold for the ratio of datastore free storage/total storage capacity to be monitored during VMware virtual machine backup operations. If the current ratio of free storage/total storage capacity of a datastore is below the configured threshold, the associated backup operation will be cancelled. A reading of 0.01 means the amount of free storage is 1% of total storage capacity." + }, + "monitorDatastoreSpaceFreqInMs": { + "type": "integer", + "format": "int32", + "description": "Frequency, in milliseconds, of the vCenter monitoring job." + }, + "monitorDatastoreSpaceEnabled": { + "type": "boolean", + "description": "A Boolean value that indicates if the cluster monitors the free capacity of a datastore." + }, + "numOfRetriesToPopulateBlueprintFile": { + "type": "integer", + "format": "int32", + "description": "Number of retries to populate the blueprint file for Vmware backup jobs." + }, + "monitorVcenterJobFreqInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency of the vCenter monitoring job in minutes." + }, + "numRetriesToWaitForVddkServer": { + "type": "integer", + "format": "int32", + "description": "Number of times to retry while waiting for Vddk server to come up on VMC proxy vm. Ping waits for 10 seconds in between retries." + }, + "fetchHostCertTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout in milliseconds for fetching the host certificates during the vcenter refresh job." + }, + "intelligentVcenterHeartBeatEnabled": { + "type": "boolean", + "description": "Enable intelligent vCenter heartbeat feature." + }, + "enableVMwareStretchCluster": { + "type": "boolean", + "description": "Enable VMware Stretch cluster. When this flag is enabled, VMware refresh job will link VMs from different clusters for VMs have the same name and moid." + }, + "disableVmotionWhenTakingSnapshot": { + "type": "boolean", + "description": "Calls VixDiskLib's PrepareForAccess before Vmdk open." + }, + "mergeCbtAndQueryAllocatedBlocks": { + "type": "boolean", + "description": "Whether we will merge dirty blocks indicated by Cbt and query allocated blocks on incremental snapshots; this can reduce data to ingest for thin provisioned VMDKs." + }, + "exportAllocatedBlocksOnly": { + "type": "boolean", + "description": "Whether to write allocated blocks only during export. When set to true, export only writes blocks are marked as allocated according to VixDiskLib's query allocated blocks; otherwise, writes all blocks." + }, + "ingestAllocatedBlocksOnly": { + "type": "boolean", + "description": "Whether to read allocated blocks only during ingest. During ingest, for full snapshots, we read all blocks, for incremental snapshots, we read blocks marked as dirty by CBT. In both cases, if this flag is set to true, we will not read unallocated blocks." + }, + "maxNumberInBatchRecovery": { + "type": "integer", + "format": "int32", + "description": "Max number of VMs allowed in batch recovery." + }, + "shouldKeepBlueprintAfterInPlaceRecovery": { + "type": "boolean", + "description": "Whether to keep blueprint file after in-place recovery. The default value is false (meaning blueprint file will be deleted after in-place recovery).If we want to keep blueprint file after in-place recovery, set this config to true." + } + } + }, + "LocalAkkaConfig": { + "type": "object", + "properties": { + "sprayPort": { + "type": "integer", + "format": "int32", + "description": "Spray server port." + }, + "sprayHttpPort": { + "type": "integer", + "format": "int32", + "description": "Spray server HTTP port." + }, + "enableHttpPort": { + "type": "boolean", + "description": "Serve HTTP port for redirecting to HTTPS if true." + }, + "sprayWaitIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "The sleep interval when spray is waiting for cluster bootstrapped." + }, + "webDirectory": { + "type": "string", + "description": "Configurations for spray server related directories." + }, + "gzWebDirectory": { + "type": "string", + "description": "gz directory is within web directory for easy hot-patching." + }, + "brWebDirectory": { + "type": "string", + "description": "br directory is within web directory for easy hot-patching." + }, + "downloadDirectory": { + "type": "string", + "description": "Directory for download in spray." + }, + "eventArtifactsDirectory": { + "type": "string", + "description": "Directory for event artifacts in spray." + }, + "connectorDirectory": { + "type": "string", + "description": "Directory for connector in spray." + }, + "oracleExampleFileDirectory": { + "type": "string", + "description": "Directory for Oracle ACO example in spray." + }, + "supportBundleBaseDir": { + "type": "string", + "description": "Base directory for support bundle." + }, + "supportBundleDirName": { + "type": "string", + "description": "Name of directory for support bundle." + }, + "swaggerUiDirectory": { + "type": "string", + "description": "Directory for swagger UI." + }, + "swaggerSpecResourceDirectory": { + "type": "string", + "description": "Resource directory for swagger spec." + }, + "redocDirectory": { + "type": "string", + "description": "Directory for redoc." + }, + "xrayDirectory": { + "type": "string", + "description": "Directory for xray debugging tool." + }, + "swaggerExposeHiddenApis": { + "type": "boolean", + "description": "True if swagger will expose hidden APIs." + }, + "sprayServerHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for spray server service (ideally to be scaled using local config)." + }, + "sprayServerHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for spray server service (ideally to be scaled using local config)." + }, + "sprayServerPerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for spray server service." + }, + "sprayServerDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Spray server service debugger port." + }, + "sprayServerNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for spray server." + }, + "sprayServerUnixDomainSocketPath": { + "type": "string", + "description": "Directory for spray server unix domain socket." + }, + "fipsEnabledFlagPath": { + "type": "string", + "description": "Directory for fips enabled." + } + } + }, + "LocalAppFlowsConfig": { + "type": "object", + "properties": { + "snappableOperationsThreads": { + "type": "integer", + "format": "int32", + "description": "Number of snappable operations threads." + }, + "appflowsEnableDiffTreeNumRetries": { + "type": "integer", + "format": "int32", + "description": "During post failover API, the number of times to retry enabling diff tree for blob store." + }, + "appflowsEnableDiffTreeRetryDelayMilliSecs": { + "type": "integer", + "format": "int32", + "description": "During post failover API, sleep duration in milli-seconds between attempts to enable diff tree for blob store." + }, + "hydratedSnappableTimeToLiveInDays": { + "type": "integer", + "format": "int32", + "description": "Maximum time to live for the hydrated snappables in days." + }, + "perSnappableHydrationStatsEnabled": { + "type": "boolean", + "description": "Enable true to capture per snappable hydration metrics. Enabling per snappable hydration metrics will affect influx db, use only when needed to do fine grained debugging." + } + } + }, + "LocalAtlasConfig": { + "type": "object", + "properties": { + "uuid_to_header_cache_size": { + "type": "integer", + "format": "int32", + "description": "Maximum number of entries in uuid header cache." + }, + "uuid_to_parent_cache_size": { + "type": "integer", + "format": "int32", + "description": "Maximum number of entries in parent uuid cache." + }, + "file_to_file_type_cache_size": { + "type": "integer", + "format": "int32", + "description": "Maximum number of entries in file type cache." + }, + "path_util_parent_uuid_cache_size": { + "type": "integer", + "format": "int32", + "description": "Maximum number of uuids that can be cached by PathUtil." + }, + "path_util_header_cache_bytes": { + "type": "integer", + "format": "int64", + "description": "Maximum memory consumption of all headers cached by PathUtil." + }, + "two_level_kvstore_in_mem_index_block_cache_size_hi": { + "type": "integer", + "format": "int32", + "description": "Number of two-level kvstore index blocks to keep in memory in deserialized form per two-level kvstore instance when client provides an index cache, or a global in memory cache is available for use. This is higher for high end so a client who cares about caching gets better performance." + }, + "tlkv_in_mem_index_block_cache_size_dry_read": { + "type": "integer", + "format": "int32", + "description": "Number of two-level kvstore index blocks to keep in memory in deserialized form per two-level kvstore instance for dry reads." + }, + "tlkv_global_max_capacity": { + "type": "integer", + "format": "int32", + "description": "Maximum number of two-level kvstore index blocks which can be kept in memory caches overall across multiple kvstores. This will allow us to limit the overall memory which can be consumed by these kvstores globally." + }, + "tlkv_global_target_capacity": { + "type": "integer", + "format": "int32", + "description": "This is the target number to which the total number of cached in memory two-level kvstore index blocks will be brought down to when the maximum overall limit is hit." + }, + "open_files_soft_limit": { + "type": "integer", + "format": "int32", + "description": "If total number of files in file cache exceed this, remove all opened files that are idle with zero open count." + }, + "external_open_files_hard_limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of external files allowed to be open. Once we reach this limit, we will fail subsequent opens on external files." + }, + "writable_open_files_hard_limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of total files allowed to be open in writable mode. Once we reach this limit, we will fail subsequent file opens in writable mode." + }, + "open_files_hard_limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of total files allowed to be open. Once we reach this limit, we will fail subsequent file opens." + }, + "open_files_memory_consumption_soft_limit": { + "type": "integer", + "format": "int64", + "description": "If memory consumption for open files exceed this, start removing idle files with zero open count." + }, + "open_files_target_memory_consumption": { + "type": "integer", + "format": "int64", + "description": "If memory consumption for open files exceed this, request the highest memory usage files to reduce their memory consumption. If < 0, it will be considered disabled." + }, + "open_files_memory_consumption_hard_limit": { + "type": "integer", + "format": "int64", + "description": "Maximum memory consumption for open files. Once we reach this limit, we will fail subsequent opens." + }, + "append_only_file_max_unflushed_bytes": { + "type": "integer", + "format": "int64", + "description": "Maximum bytes we can keep unflushed for each append only file." + }, + "append_only_file_global_unflushed_bytes": { + "type": "integer", + "format": "int32", + "description": "Extra memory that can be consumed by all appendonly files to cache more data for performance. This is on top of the per file limit [append_only_file_max_unflushed_bytes]. This memory is currently only used for cloud locations to parallelize the number of connections (writes) to cloud." + }, + "inplace_max_buffered_bytes": { + "type": "integer", + "format": "int64", + "description": "Maximum data that can be buffered in memory in inplace file." + }, + "inplace_buffered_bytes_low_wm": { + "type": "integer", + "format": "int64", + "description": "If the number of bytes buffered of inplace file is more than this, flush bytes from non full stripes till we have less than or equal to this many bytes buffered." + }, + "num_concurrent_mjf_conversions": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent long running mjf conversions to allow." + }, + "num_concurrent_short_running_mjf_conversions": { + "type": "integer", + "format": "int32", + "description": "Number of concurrent short running mjf conversions to allow." + }, + "global_chunk_cache_size": { + "type": "integer", + "format": "int64", + "description": "Size of read-ahead chunk cache." + }, + "file_journal_max_total_buffer_bytes": { + "type": "integer", + "format": "int64", + "description": "Maximum number of journal bytes that can be buffered by all journals. Roughly an equal number of bytes will also buffered in the journal's write queues, to ensure good performance." + }, + "fileserver_max_inflight_write_bytes": { + "type": "integer", + "format": "int64", + "description": "Max memory buffer limit for receved write request on file server. Once we reach this limit, we will fail subsequent write requests." + }, + "reed_solomon_slice_length": { + "type": "integer", + "format": "int32", + "description": "Slice length to be used in Reed Solomon encoding." + }, + "sorted_range_cache_global_memory_limit_bytes": { + "type": "integer", + "format": "int64", + "description": "Maximum memory consumption across all sorted range caches." + }, + "router_write_sleep_us": { + "type": "integer", + "format": "int32", + "description": "Number of micro seconds that each write of router file should sleep for. This can be used to throttle down write speed of the router." + }, + "tlkv_compress_stage_pool_size": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for compressing data in tlkv store." + }, + "tlkv_compress_stage_ordering_buffer_size": { + "type": "integer", + "format": "int32", + "description": "Ordering buffer size for compressing data in tlkv store." + }, + "tlkv_compress_stage_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for compressing data in tlkv store." + }, + "patch_file_builder_fp_stage_pool_size": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for fingerprinting data in patch file builder." + }, + "patch_file_builder_fp_stage_ordering_buffer_size": { + "type": "integer", + "format": "int32", + "description": "Ordering buffer size for fingerprinting in patch file builder." + }, + "patch_file_builder_fp_stage_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for fingerprinting data in patch file builder." + }, + "journal_fingerprint_stage_pool_size": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for fingerprinting data in journals." + }, + "journal_fingerprint_stage_ordering_buffer_size": { + "type": "integer", + "format": "int32", + "description": "Ordering buffer size for fingerprinting data in journals." + }, + "journal_fingerprint_stage_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for fingerprinting data in journals." + }, + "journal_compress_stage_pool_size": { + "type": "integer", + "format": "int32", + "description": "Number of threads compressing data per file journal." + }, + "journal_compress_stage_ordering_buffer_size": { + "type": "integer", + "format": "int32", + "description": "Ordering buffer size for compressing data in journals." + }, + "journal_compress_stage_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for compressing data in journals." + }, + "tlkvs_index_prefetch_pool_size": { + "type": "integer", + "format": "int32", + "description": "Number of threads for index block prefetch in tlkvs." + }, + "tlkvs_index_prefetch_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for index block prefetch in tlkvs." + }, + "tlkvs_index_prefetch_count": { + "type": "integer", + "format": "int32", + "description": "Number of index blocks to prefetch each time." + }, + "router_file_max_open_bins": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed number of bins to be open with a router file (ContentRoutedSplitMJF) simultaneously." + }, + "linear_split_mjf_max_open_bins": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed number of bins to be open with a LinearSplitMJF simultaneously." + }, + "sparse_file_read_ahead_cache_size_in_mb": { + "type": "integer", + "format": "int32", + "description": "Maximum allowed size in MB for sparse file read ahead cache." + }, + "enable_range_cache": { + "type": "integer", + "format": "int32", + "description": "Whether to enable range cache." + }, + "sdfsMount": { + "type": "string", + "description": "Mount directory for SDFS." + }, + "sdfsServicePort": { + "type": "integer", + "format": "int32", + "description": "SDFS service port." + }, + "sdfsProfile": { + "type": "boolean", + "description": "True if profiling on SDFS." + }, + "sdfsExecInstrumentation": { + "type": "string", + "description": "SDFS execution instrumentation." + }, + "sdfsEnableCoreDumps": { + "type": "boolean", + "description": "True if core cumps are enabled for SDFS." + }, + "sdfsMemoryAllocator": { + "type": "string", + "description": "Memory allocator for SDFS. Setting to \"\" will default to glibc malloc." + }, + "cluster_disk_usable_fraction": { + "type": "number", + "format": "double", + "description": "Disk usage threshold past which it will be considered overfull." + }, + "allocation_balancing_factor": { + "type": "number", + "format": "double", + "description": "Allocation balancing factor for SDFS for nodes, should be >= 1.0, higher value means allocation based on disk fullness." + }, + "disk_allocation_balancing_factor": { + "type": "number", + "format": "double", + "description": "Allocation balancing factor for SDFS while allocating to disks, should be >= 1.0, higher value means allocation based on disk fullness." + }, + "fileserver_status_refresh_interval": { + "type": "number", + "format": "double", + "description": "Fileserver status refresh interval in seconds." + }, + "directory_spec_minimum_ttl_secs": { + "type": "integer", + "format": "int32", + "description": "Minimum value for ttl_secs field of DirectorySpec, in seconds." + }, + "file_cache_close_max_grace_period_secs": { + "type": "integer", + "format": "int32", + "description": "Length of time that idle files remain in the SDFS cache. When in the cache, files in TTL directories have their mtime updated in the background." + }, + "metadata_scan_period_secs": { + "type": "integer", + "format": "int32", + "description": "Frequency of scanning metadata to trigger maintenance operations." + }, + "archived_gc_period_secs": { + "type": "integer", + "format": "int32", + "description": "Frequency of scanning archived metadata to handle GC." + }, + "config_refresh_period_millis": { + "type": "integer", + "format": "int32", + "description": "Period of refreshing global configs from metadata." + }, + "stats_interval_secs": { + "type": "integer", + "format": "int32", + "description": "Interval used for reporting stats." + }, + "min_cluster_size_to_enable_erasure_coding": { + "type": "integer", + "format": "int32", + "description": "Minimum number of nodes required to enable erasure coding." + }, + "pfc_prefetch_pipeline_pool_size": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for PFC prefetching pipeline." + }, + "pfc_prefetch_pipeline_ordering_buffer_size": { + "type": "integer", + "format": "int32", + "description": "Ordering buffer size for PFC prefetching pipeline." + }, + "pfc_prefetch_pipeline_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for PFC prefetching pipeline." + }, + "pfc_extents_queue_size": { + "type": "integer", + "format": "int32", + "description": "Queue size to hold prefetched extents in PFC. Each element in the queue can hold multiple extents." + }, + "provisioned_space_per_node_on_cloud_in_bytes": { + "type": "integer", + "format": "int64", + "description": "Provisioned space per node to default to when scan stats are not available." + }, + "sdfs_pull_open_mjfs_queue_size": { + "type": "integer", + "format": "int32", + "description": "Worker queue size for parallelizing pulling open mjfs." + }, + "sdfs_pull_open_mjfs_pool_size": { + "type": "integer", + "format": "int32", + "description": "Number of threads parallelizing pulling open mjfs." + }, + "default_flash_cache_max_allowed_index_memory_mb": { + "type": "integer", + "format": "int32", + "description": "Default maxiumum allowed memory in MB to be used for in-memory index of cache entries for a persistent flash cache." + }, + "open_info_cache_max_allowed_index_memory_mb": { + "type": "integer", + "format": "int32", + "description": "Maxiumum allowed memory in MB to be used for in-memory index of cache entries for an OpenInfo cache." + }, + "cloud_read_cache_max_allowed_index_memory_mb": { + "type": "integer", + "format": "int32", + "description": "Maxiumum allowed memory in MB to be used for in-memory index of cache entries for a cloud read cache." + }, + "patch_file_range_checker_max_memory_usage": { + "type": "integer", + "format": "int32", + "description": "Maximum memory usage by range checker for patch files." + }, + "range_map_range_checker_max_memory_usage": { + "type": "integer", + "format": "int32", + "description": "Maximum memory usage by range checker for patch files." + } + } + }, + "LocalBlobstoreConfig": { + "type": "object", + "properties": { + "consolidateJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the consolidate job cannot acquire resources." + }, + "consolidateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent consolidate jobs per node. This includes jobs run for archival consolidation as well." + }, + "percentOfArchivalConsolidationJobsPerNode": { + "type": "number", + "format": "double", + "description": "Percentage of consolidation jobs per node that can run Archival Consolidation. This is relative to consolidateJobInMemorySemShares. Archival consolidation can be disabled on a given node by seeting this value to zero. We recommend to keep (percentOfSdfsConsolidationJobsPerNode + percentOfArchivalConsolidationJobsPerNode) <= 1. Otherwise either the local or the archival consolidation may end up waiting on each other for longer than desired. For instance, all local consolidation jobs could queue up behind the the archival ones. The configs are kept independently to allow disabling one of them in the field independently of the other." + }, + "percentOfSdfsConsolidationJobsPerNode": { + "type": "number", + "format": "double", + "description": "Percentage of consolidation jobs per node that can run consolidation locally against SDFS. This is relative to consolidateJobInMemorySemShares. Local consolidation can be disabled on a given node by seeting this value to zero. We recommend to keep (percentOfSdfsConsolidationJobsPerNode + percentOfArchivalConsolidationJobsPerNode) <= 1. Otherwise either the local or the archival consolidation may end up waiting on each other for longer than desired. For instance, all local consolidation jobs could queue up behind the the archival ones. The configs are kept independently to allow disabling one of them in the field independently of the other." + }, + "crossJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the cross increment job cannot acquire resources." + }, + "minDataReductionFactorFromCross": { + "type": "number", + "format": "double", + "description": "Minimum data reduction factor that is expected for any cross increment." + }, + "crossJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cross jobs per node." + }, + "crossJobInMemorySemSharesInRollbackMode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cross jobs per node when cluster is in rollback mode. Cross is disabled by default in rollback mode." + }, + "reverseJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the reverse increment job cannot acquire resources." + }, + "reverseSpecsCapInTiB": { + "type": "integer", + "format": "int32", + "description": "Cap on the reverse specs per vm in TiB." + }, + "maxReverseJobsPerNodeInRollbackMode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent reverse jobs per node when the cluster is in rollback state. Reverse is slowed down during this time, so count will be lower." + }, + "maxLocalReverseJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent local reverse jobs per node. This includes reverse jobs that run locally, but reverse the blobstore chain on an archival location (typically NFS or private object stores)." + }, + "maxArchivalReverseJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent archival reverse jobs per node. This exclusively includes the reverse jobs that run on a bolt for an archival location. In particular, it does not include reverse jobs for NFS or private object stores." + }, + "maxPromoteManagedVolumeExportHandleJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume handle promotion jobs." + }, + "gcJobMaxFailedCount": { + "type": "integer", + "format": "int32", + "description": "Garbage Collector job maximum failure count." + }, + "gcJobMaxRunTimeInMins": { + "type": "integer", + "format": "int32", + "description": "Garbage Collector job maximum run time in minutes." + }, + "gcJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the Garbage Collector job cannot acquire resources." + }, + "gcJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent blobstore GC jobs per node." + }, + "maxContentPerGroup": { + "type": "integer", + "format": "int32", + "description": "Limits number of contents that can exist in a group." + }, + "maxRepresentationsPerGroup": { + "type": "integer", + "format": "int32", + "description": "Limits number of representations (across all contents) that can exist in a group." + }, + "maxHandlesPerGroup": { + "type": "integer", + "format": "int32", + "description": "Limits number of handles." + }, + "alertingPercentage": { + "type": "integer", + "format": "int32", + "description": "Percentage of max contents, max representations and max handles at which alerts will be made." + }, + "abandonThresholdInDaysForGroupsNotUpdated": { + "type": "integer", + "format": "int32", + "description": "Groups are considered abandoned if no updates are made for this duration." + }, + "purgeThresholdInDaysForGroupsNotReadOrUpdated": { + "type": "integer", + "format": "int32", + "description": "Groups may be purged if not read or updated within this duration. The value of this should be greater than abandonThresholdInDaysForGroupsNotUpdated." + }, + "optimizeRepresentationJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the optimize representation job cannot acquire resources." + }, + "maxOptimizeRepresentationJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent optimize representation jobs per node." + }, + "blobstoreTestSvcHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for blobstore test service (ideally to be scaled using local config)." + }, + "blobstoreTestSvcHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for blobstore test service (ideally to be scaled using local config)." + }, + "blobstoreTestSvcPerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for blobstore test service." + }, + "blobStoreNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for blob store service." + }, + "blobStoreHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for blob store service (ideally to be scaled using local config)." + }, + "blobStoreHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for blob store service (ideally to be scaled using local config)." + }, + "blobStorePerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for blob store service." + }, + "blobStoreDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Blob store service debugger port." + }, + "blobStoreServiceNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of parallel threads to execute RPCs in BlobStore service." + }, + "crossRebaseJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cross rebase jobs per node." + }, + "crossRebaseJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the cross rebase job cannot acquire resources." + } + } + }, + "LocalCallistoConfig": { + "type": "object", + "properties": { + "establishConnectionTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "How long to wait for establishment of a database connection." + }, + "socketReadTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout value for socket read operations." + }, + "connectionPoolWaitTimeoutInMillis": { + "type": "integer", + "format": "int32", + "description": "Timeout value for a thread to wait to get connection." + }, + "cockroachBackupMaxConcurrency": { + "type": "integer", + "format": "int32", + "description": "Max number of tables backed up concurrently." + }, + "eventsMigrationPageSize": { + "type": "integer", + "format": "int32", + "description": "Page size for scanning sd.events_all table." + }, + "numThreadsForRootCachesRefresh": { + "type": "integer", + "format": "int32", + "description": "Number of threads used for root caches refresh in ManagedHierarchyCache during events migration." + }, + "eventsQueryPageSize": { + "type": "integer", + "format": "int32", + "description": "Page size to use when querying events recursively." + }, + "cockroachCompactSemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Cockroach compact jobs per node." + }, + "cockroachdbStore": { + "type": "string", + "description": "Directory of CockroachDB." + }, + "cockroachdbHttpPort": { + "type": "integer", + "format": "int32", + "description": "CockroachDB HTTP port." + }, + "cockroachdbMemoryTargetLowLimitMb": { + "type": "integer", + "format": "int32", + "description": "Low limit of CockroachDB memory target adjusted in RAM (ideally to be scaled using local config)." + }, + "cockroachdbMemoryTargetHighLimitMb": { + "type": "integer", + "format": "int32", + "description": "High limit of CockroachDB memory target adjusted in RAM (ideally to be scaled using local config)." + }, + "cockroachdbNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for CockroachDB." + }, + "cockroachdbLogUser": { + "type": "string", + "description": "User for CockroachDB log." + }, + "cockroachCliDefaultTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Cockroach CLI default timeout in seconds." + }, + "cockroachDecommissionTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Cockroach decommission timeout in seconds." + }, + "cockroachMaxClockOffset": { + "type": "string", + "description": "Max Clock Offset used by Cockroach." + }, + "cqlproxyGOGC": { + "type": "integer", + "format": "int32", + "description": "GOGC env value passed to cqlproxy." + } + } + }, + "LocalCdpConfig": { + "type": "object", + "properties": { + "cdpLogReceiverNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for CDP log receiver." + }, + "cdpLogReceiverCurrentLogSize": { + "type": "integer", + "format": "int32", + "description": "Log size for CDP log receiver." + }, + "cdpMetadataServiceHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for CDP metadata service (ideally to be scaled using local config)." + }, + "cdpMetadataServiceHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for CDP metadata service (ideally to be scaled using local config)." + }, + "cdpLogReceiverPerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for CDP log receiver." + }, + "cdpLogReceiverDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Debugger port number for CDP log receiver." + }, + "cdpLogReceiverFilterServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port number for CDP log receiver filter service." + }, + "cdpLogReceiverInternalServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port number for CDP log receiver internal service." + }, + "cdpMetadataServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port number for CDP metadata service." + }, + "cdpReceiverLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Log verbosity for CDP log receiver." + }, + "cdpReceiverNumWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Number of worker threads for CDP log receiver." + }, + "cdpReceiverMaxLogWriteSizeInBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum log write size for CDP log receiver in bytes." + }, + "cdpReceiverMaxUnsyncedBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum unsynced bytes for CDP log receiver (Default is 1GB)." + }, + "cdpReceiverSchedulerSleepDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep duration for CDP receiver scheduler in millisecond." + }, + "cdpReceiverMaxLocalBufferSizeInBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum local buffer size for CDP log receiver in bytes." + }, + "cdpReceiverNotifyBufferSizeThresholdInBytes": { + "type": "integer", + "format": "int32", + "description": "Notify buffer size threshold for CDP log receiver in bytes." + }, + "cdpShmToCmsBeatsIntervalInMillis": { + "type": "integer", + "format": "int32", + "description": "shm to cms beats interval for CDP in millisecond." + }, + "cdpReceiverRollLogSizeThreshold": { + "type": "integer", + "format": "int64", + "description": "Roll log size threshold for CDP log receiver in bytes (Default is 10GB)." + }, + "cdpReceiverRollLogTimeThresholdInMillis": { + "type": "integer", + "format": "int32", + "description": "Roll log time threshold for CDP log receiver (Default is 12 hours)." + }, + "cdpEnableProfiling": { + "type": "boolean", + "description": "If set to true, then profiling is enabled for CDP." + }, + "cdpLogReceiverInternalServiceSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Timeout for CDP log receiver internal service socket in millisecond." + }, + "cdpLogReceiverReplayRequestTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Timeout for CDP log receiver replay request in seconds." + }, + "sendWritesSlowOpThresholdInMillis": { + "type": "integer", + "format": "int32", + "description": "Send writes slow operation threshold in millisecond." + }, + "maxConcurrentReplications": { + "type": "integer", + "format": "int32", + "description": "Maximum concurrent replications." + }, + "maxConcurrentLogSendsPerVmdk": { + "type": "integer", + "format": "int32", + "description": "Maximum concurrent log sends per VMDK." + }, + "maxRecordsPerReadReplication": { + "type": "integer", + "format": "int32", + "description": "Maximum records per read replication." + }, + "liveReplicationCacheSizeBytesPerVmdk": { + "type": "integer", + "format": "int32", + "description": "Live replication cache size per VMDK in bytes (Default is 64MB)." + }, + "cdpReplicationSendMaxSleepRetryInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum sleep retry for CDP replication sender in millisecond." + }, + "cdpReplicationSendMaxRetries": { + "type": "integer", + "format": "int32", + "description": "Maximum number of retries for CDP replication sender." + }, + "cdpReceiverMemoryAllocator": { + "type": "string", + "description": "Memory allocator for CDP receiver." + }, + "cdpReceiverMemoryMonitorHeapProfileDumpDir": { + "type": "string", + "description": "Memory monitor heap profile dump directory for CDP receiver." + }, + "cdpReceiverGlobalBufferEnabled": { + "type": "boolean", + "description": "If set to true, then global buffer is enabled for CDP receiver." + }, + "cdpReceiverSsdBufferEnabled": { + "type": "boolean", + "description": "If set to true, then SSD buffer is enabled for CDP receiver." + }, + "cdpReceiverCoreDumpsEnabled": { + "type": "boolean", + "description": "If true, then Core Dumps are enabled for CDP receiver." + }, + "cdpMetadataServiceCoreDumpsEnabled": { + "type": "boolean", + "description": "If true, then Core Dumps are enabled for CDP Metadata Service." + }, + "logHydrationJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent log hydration jobs per node." + }, + "cdpPerSnappableHydrationStatsEnabled": { + "type": "boolean", + "description": "Enable true to capture per snappable hydration metrics. Enabling per snappable hydration metrics will affect influx db, use only when needed to do fine grained debugging." + }, + "maxConcurrentLogHydrationStreamsPerVm": { + "type": "integer", + "format": "int32", + "description": "Maximum concurrency in replaying logs remotely per VM." + }, + "enableCdpMetadata": { + "type": "boolean", + "description": "The flag to enable cdp metadata service." + } + } + }, + "LocalCerebroConfig": { + "type": "object", + "properties": { + "createVmwareSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create snapshot jobs per node." + }, + "maxCdpInMemoryBufferSizeInMegaBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum size of the in memory global buffer for CDP log receiver." + }, + "maxCdpPerHandleInMemoryBufferSizeInMegaBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum size which can be used by one handle from in-memory global buffer." + }, + "maxCdpSsdBufferSizeInMegaBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum size of the ssd buffer for CDP log receiver." + }, + "maxCdpPerHandleSsdBufferSizeInMegaBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum size which can be used by one handle from in-memory global buffer." + }, + "maxConcurrentIngestsPerVm": { + "type": "integer", + "format": "int32", + "description": "Maximum number of ingest streams per virtual machine." + }, + "maxConcurrentIngestsPerDb": { + "type": "integer", + "format": "int32", + "description": "Maximum number of ingest streams per database." + }, + "maxConcurrentLogIngestsPerDb": { + "type": "integer", + "format": "int32", + "description": "Maximum number of log ingest streams per database." + }, + "expireJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent expire jobs per node." + }, + "mountJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent mount jobs per node." + }, + "unmountJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent unmount jobs per node." + }, + "pullMssqlLogReplicateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent pull mssql log replicate jobs per node." + }, + "pullOracleLogReplicateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent pull Oracle log replicate jobs per node." + }, + "pullReplicateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent pull replicate jobs per node." + }, + "pullReplicateRequestsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent pull replicate requests from target cluster supported per node." + }, + "refreshRemoteSnapshotsInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent refresh remote snapshots jobs per node." + }, + "managedVolumeSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Managed Volume snapshot jobs per node." + }, + "slaManagedVolumeSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Managed Volume snapshot jobs per node, for SLA Managed Volumes." + }, + "sapHanaSystemRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent SAP HANA system refresh jobs per node." + }, + "sapHanaLogSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent SAP HANA log snapshot jobs per node." + }, + "sapHanaDataSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create SAP HANA data snapshot job per node." + }, + "sapHanaConfigureRestoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent configure restore job per node." + }, + "sapHanaUnconfigureRestoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent unconfigure restore job per node." + }, + "replicateToCloudJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent replicate to cloud jobs per node." + }, + "uploadJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent upload jobs per node." + }, + "uploadIndexJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent upload index jobs per node." + }, + "snappableIndexJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent index jobs per node." + }, + "analyzeSnappableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent analyze snappable jobs per node." + }, + "unlinkSnappableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent unlink jobs per node." + }, + "hdfsSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent HDFS snapshot jobs per node." + }, + "mssqlSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent mssql snapshot jobs per node." + }, + "nutanixSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create Nutanix snapshot jobs per node." + }, + "volumeGroupSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create Volume Group snapshot." + }, + "vcdVappSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create VcdVapp snapshot." + }, + "storageArraySnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create storage array snapshot jobs per node." + }, + "hypervSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent hyperv snapshot jobs per node." + }, + "hypervFingerprintSnapshotJobTypeMaxperNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent hyperv fingerprint snapshot jobs per node that can run." + }, + "hypervScvmmRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent hyperv scvmm refresh jobs per node." + }, + "hypervScvmmDeleteJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete SCVMM jobs per node." + }, + "hypervServerRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent hyperv server refresh jobs per node." + }, + "oracleDbRestoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent oracle db restore jobs per node." + }, + "deleteHostJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete host jobs per node." + }, + "upgradeHostRbaJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent upgrade host rba jobs per node." + }, + "manageHostRbaJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent manage host rba jobs per node." + }, + "agentMakePrimaryJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent RbaMakePrimary jobs per node." + }, + "remoteClusterServiceMinWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Minimum number of worker threads in remote cluster server thread pool." + }, + "remoteClusterServiceMaxWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Maximum number of worker threads in remote cluster server thread pool." + }, + "storageArrayDeleteJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Specifies the maximum number of storage array delete jobs that can run concurrently on a node." + }, + "appBlueprintSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent create Blueprint snapshot jobs per node." + }, + "networkThrottleReplicationInterface": { + "type": "string", + "description": "Interface to apply network throttling for replication." + }, + "networkThrottleArchivalInterface": { + "type": "string", + "description": "Interface to apply network throttling for archival." + }, + "numFileSystemShards": { + "type": "integer", + "format": "int32", + "description": "Number of shards to be created in the file system." + }, + "replicationPrefixPathInSdMount": { + "type": "string", + "description": "Path of the replication root directory in SDFS." + }, + "periodicCloudComputeConnectivityCheckJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of periodic connectivity check jobs per node." + }, + "snmpMibUpdateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent SNMP MIB update jobs per node." + }, + "replicationSeedingImportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of replication seeding import jobs per node." + }, + "replicationSeedingExportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of replication seeding export jobs per node." + }, + "replicationSeedingImportNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for importing patch files in replication seeding import." + }, + "metadataExportMaxParallelism": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallelism allowed to export metadata." + }, + "replicationSeedingExportBatchSize": { + "type": "integer", + "format": "int32", + "description": "Batch size for exporting snapshots in replication seeding export." + }, + "cleanUpReferencesFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for cleaning up references." + }, + "frequentStatsUpdaterFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for stats updater jobs in minutes." + }, + "infrequentStatsUpdaterFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for infrequent stats jobs in minutes." + }, + "liteRefreshJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between lite refresh jobs in minutes." + }, + "refreshJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of refresh jobs in minutes." + }, + "remoteSnapshotRefreshPeriodMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval between remote snapshot refresh jobs in minutes." + }, + "verifySlaJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Frequency in minutes for verify sla jobs." + }, + "jobIntervalToleranceFactor": { + "type": "integer", + "format": "int32", + "description": "This is the tolerance factor between two job instances's start time. Usually if we subtract the start time between two job instances we should be within a small factor from the job frequency. If this factor is greater than this factor we will log a message and alert back home. We provide a larger tolerance for single node mode because it has more limited resources to process all the jobs as compared to other platforms." + }, + "clusterwideStatsUpdaterFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for clusterwide stats updater jobs in minutes." + }, + "refreshStorageArrayJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval of Storage Array refresh jobs in minutes." + }, + "polarisPullReplicateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Polaris source pull replicate jobs per node." + }, + "ApplyLegalHoldJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Apply Legal Hold jobs per node." + }, + "DissolveLegalHoldJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent Dissolve Legal Hold jobs per node." + }, + "scheduleDeferredOdsJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent ScheduleDeferredOds jobs per node." + }, + "notifyExpiringCertificatesJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent NotifyExpiringCertificates per node." + }, + "sdfsStatAllPageSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of entries to be pulled per sdfs statAll RPC." + }, + "maxThriftClientErrorBackoffTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Maximum amount of time to backoff for client-error in thrift RPCs." + }, + "initialThriftClientErrorBackoffTimeInMs": { + "type": "integer", + "format": "int32", + "description": "Initial backoff time for client-error in thrift RPCs." + }, + "maxRetriesForThriftClientError": { + "type": "integer", + "format": "int32", + "description": "Maximum retries for client-error in thrift RPCs." + }, + "manageHotAddProxyVmJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum nummber of concurrent DeployHotAddProxyVm jobs per node." + }, + "refreshNasSharesStatusJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent shares refresh jobs per node." + }, + "emailSubscriptionJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent shares for email subscription jobs per node." + }, + "maxArchivalMigrateSnappableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent ArchivalMigrateSnappable jobs per node." + }, + "emailSubscriptionCleanupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent shares for email subscription clean up jobs per node." + }, + "numRetriesForSdfsDeleteDirectory": { + "type": "integer", + "format": "int32", + "description": "Number of retries for sdfs delete directory. Sdfs delete directory is known to hit backpressure on cockroachdb when many children are present in that directory. This config allows for sufficient exponential backoff retries before failing." + } + } + }, + "LocalCloudConfig": { + "type": "object", + "properties": { + "enableCloudConversionJobForAws": { + "type": "boolean", + "description": "If set to true, runs the new cloud conversion job else runs the old instantiation job." + }, + "enableCloudConversionJobForAzure": { + "type": "boolean", + "description": "If set to true, runs the new cloud conversion job else runs the old instantiation job." + }, + "supportedDistrosForCloudConversionJobOnAws": { + "type": "string", + "description": "The list of comma separated os distros for cloud image conversion job on aws." + }, + "supportedDistrosForCloudConversionJobOnAzure": { + "type": "string", + "description": "The list of comma separated os distros for cloud image conversion job on azure." + }, + "supportedDistrosForCloudInstantiationJobOnAzure": { + "type": "string", + "description": "The list of comma separated os distros for cloud image instantiation job on azure." + }, + "minimumFreeSpaceInBootForConversionInMb": { + "type": "integer", + "format": "int32", + "description": "Minimum free space in /boot directory to start conversion." + }, + "minimumFreeSpaceInSbinForConversionInMb": { + "type": "integer", + "format": "int32", + "description": "Minimum free space in /sbin directory to start conversion." + }, + "minimumFreeSpaceInCDriveForConversionInMb": { + "type": "integer", + "format": "int32", + "description": "Minimum free space in C:/Rubrik directory to start conversion." + }, + "minimumSupportedLinuxKernelForAWS": { + "type": "string", + "description": "Minimum supported Linux kernel version for incremental conversion to AWS." + }, + "minimumSupportedLinuxKernelForAzure": { + "type": "string", + "description": "Minimum supported Linux kernel version for incremental conversion to Azure." + }, + "disabledPrechecks": { + "type": "string", + "description": "List of prechecks to disable. (Can be set per snappable)." + }, + "enableFallbackToVmImportFromEbsSnapshot": { + "type": "boolean", + "description": "If set to true, runs cloud image conversion job with vm import on AWS for fallback." + }, + "cloudComputeServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port number for instantiation_cloud_init.txt script." + }, + "cloudServiceParentCertsPath": { + "type": "string", + "description": "Cloud service parent certificates path." + }, + "cloudServiceSecurityTokenPath": { + "type": "string", + "description": "Cloud service security token path." + }, + "linuxConverterParentCertsPath": { + "type": "string", + "description": "Linux converter parent certificates path." + }, + "linuxConverterSecurityTokenPath": { + "type": "string", + "description": "Linux converter security token path." + }, + "windowsConverterParentCertsPath": { + "type": "string", + "description": "Windows converter parent certificates path." + }, + "windowsConverterSecurityTokenPath": { + "type": "string", + "description": "Windows converter security token path." + }, + "shouldSkipCloseDisk": { + "type": "boolean", + "description": "Skip closing disk at end of CreateRawDisk." + } + } + }, + "LocalDlcConfig": { + "type": "object", + "properties": { + "dlcServiceNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of parallel threads to execute RPCs in DLC service." + }, + "dlcNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for DLC service." + } + } + }, + "LocalEnvisionConfig": { + "type": "object", + "properties": { + "sqliteCacheSizePerNodeInKiB": { + "type": "integer", + "format": "int32", + "description": "Size of memory cache per SQLite process in kibibytes. These SQLite dbs are used for producing reports." + }, + "generateReportDataSourcesJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to generate report data sources." + }, + "generateReportDataSourcesJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the generate report data source jobs in minutes." + }, + "generateReportDataSourcesFrequentJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval to run the generate report data source frequent jobs in minutes." + }, + "generateCustomReportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to generate custom reports." + }, + "emailReportsJobFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Interval for email reports jobs in minutes." + }, + "cleanupReportTmpDirJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs per node to clean report tmp files." + }, + "emailReportOneTimeJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent email now job per node." + } + } + }, + "LocalFailureTestingConfig": { + "type": "object", + "properties": { + "failureInjectionServerThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port failure injection server." + }, + "failureInjectionNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files to keep for failure injection." + }, + "skipFailureCleanupOnStart": { + "type": "boolean", + "description": "Flag to skip failure cleanup on start." + } + } + }, + "LocalFilesetConfig": { + "type": "object", + "properties": { + "filesetDataFetchPartitionsPerNode": { + "type": "integer", + "format": "int32", + "description": "This value specifies the global maximum number of cluster-wide fileset fetch job partitions that can concurrently run on a single node, shared across all jobs." + }, + "filesetRestorePartitionsPerNode": { + "type": "integer", + "format": "int32", + "description": "This value specifies the global maximum number of cluster-wide fileset restore job partitions that can concurrently run on a single node, shared across all jobs." + }, + "filesetIngestUseFlash": { + "type": "integer", + "format": "int32", + "description": "Whether to use flash for fileset ingest. This creates MJF on flash. This is enabled only for passthrough currently." + }, + "filesetSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent fileset snapshot jobs per node." + }, + "filesetRestoreFileJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent jobs to restore files to hosts per node." + }, + "filesetMaxParallelFetchConnectionsPerSnapshot": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallel fetch connections per snapshot." + }, + "filesetNasMaxParallelFetchConnectionsPerSnapshot": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallel fetch connections per snapshot on top of a NAS share." + } + } + }, + "LocalForgeConfig": { + "type": "object", + "properties": { + "nodeMonitorTaskThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for node monitor tasks." + }, + "bootupOtherNodesCheckerNumThreads": { + "type": "integer", + "format": "int32", + "description": "Thread pool size for node monitor tasks." + }, + "ntpOrphanModeStratum": { + "type": "integer", + "format": "int32", + "description": "NTP enters orphan mode if no external server with a better (smaller) stratum than this value is accessible. Once NTP becomes an orphan leader, it adopts this stratum." + }, + "ntpMinPollIntervalLog2": { + "type": "integer", + "format": "int32", + "description": "log_2 of min polling interval of ntp servers." + }, + "ntpMaxPollIntervalLog2": { + "type": "integer", + "format": "int32", + "description": "log_2 of max polling interval of ntp servers." + }, + "ntpConfigurationMode": { + "type": "string", + "description": "mode for configuring NTP." + }, + "nodeApiNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for node and cluster API implementations." + }, + "clusterClassificationCachePath": { + "type": "string", + "description": "Storage location for local copy of cluster classification." + }, + "autoInstallOnAddEnabledForPlatform": { + "type": "boolean", + "description": "Is auto install enabled on node add for this platform." + }, + "clusterNameFilePath": { + "type": "string", + "description": "Storage location for local copy of cluster name." + }, + "upgradeAuthTokenExpiryInHours": { + "type": "integer", + "format": "int32", + "description": "Upgrade service AuthToken expiration time in hours." + }, + "policyConfFile": { + "type": "string", + "description": "Config file for node monitor policies." + }, + "periodicTaskConfFile": { + "type": "string", + "description": "Config file for node monitor periodic tasks." + }, + "jobFetcherStatusPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Job Fetcher Status service." + }, + "preBootstrapClusterConfigPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Pre Bootstrap Cluster service." + }, + "clusterConfigPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Cluster Config service." + }, + "clusterUuidFile": { + "type": "string", + "description": "File path for cluster UUID file." + }, + "jobFetcherServerPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Job Fetcher Server service." + }, + "hierarchyCachePort": { + "type": "integer", + "format": "int32", + "description": "Port number for Hierarchy Cache service." + }, + "smbServiceManagerPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Smb Service Manager service." + }, + "smbServiceThriftServerPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Smb Service Thrift Server service." + }, + "smbServiceManagerThriftTimeout": { + "type": "integer", + "format": "int32", + "description": "Connection timeout for Smb Service Manager Thrift Server service." + }, + "checkFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Default policy check frequency in milliseconds." + }, + "vlanUpdaterFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "VLAN updater frequency in milliseconds." + }, + "networkRouteMaintainerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Network Route Maintainer frequency in milliseconds." + }, + "localAdminStateMaintainerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Local Admin State Maintainer frequency in milliseconds." + }, + "lastNodeUpdateTimeFile": { + "type": "string", + "description": "File path for last node update time file." + }, + "statusHistoryDbFile": { + "type": "string", + "description": "File path for status history DB file." + }, + "nodeMonitorPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Node Monitor service." + }, + "singleNodeModeFile": { + "type": "string", + "description": "Location of the file which tells if node is in single node mode." + }, + "azureCloudOnly": { + "type": "boolean", + "description": "If true, CloudOut and CloudOn is supported only for Azure cloud." + }, + "restartRootLvmServicesCmd": { + "type": "string", + "description": "Command for restarting root LVM services." + }, + "setupDisksCmd": { + "type": "string", + "description": "Command for setting up disks." + }, + "setupRootLvmCmd": { + "type": "string", + "description": "Command for setting up root LVM." + }, + "diskUtilCmd": { + "type": "string", + "description": "Command for executing disk utility." + }, + "mountDisksCmd": { + "type": "string", + "description": "Command for mounting disks." + }, + "ipmiIpAddressCmd": { + "type": "string", + "description": "Command for fetching impi ip address." + }, + "ipmiIpAddressCmdTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout for command for fetching impi ip address." + }, + "nodeMonitorNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for node monitor." + }, + "tmpNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Temperory number of log files." + }, + "nodeLivenessPublisherFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Frequency at which node should publish heartbeat." + }, + "diskCheckerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Disk checker run frequency in milliseconds." + }, + "ipmiCheckerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "IPMI checker run frequency in milliseconds." + }, + "hostAuditCertsCollectorFrequencySeconds": { + "type": "integer", + "format": "int32", + "description": "Host Audit certs collector frequency in seconds." + }, + "hardwareStatPublisherFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Hardware stats publisher watchdog frequency in milliseconds." + }, + "floatingIpsEnabled": { + "type": "boolean", + "description": "If set to true, floating IPs is enabled on the cluster." + }, + "floatingIpMapUpdaterFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Floating IP map updater frequency in milliseconds." + }, + "floatingIpAssumeInvalidWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds after which floating IP is assumed invalid." + }, + "nodeStalenessCheckerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Node staleness checker run frequency in milliseconds." + }, + "nodeDieWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds after which node may be made stale." + }, + "avahiManagerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Avahi manager run frequency in milliseconds." + }, + "cassandraPostBootstrapStartTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time to wait in seconds post bootstrap for cassandra to start." + }, + "persistNodeTableIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Time interval in seconds to persist node table." + }, + "persistNodeTableRetries": { + "type": "integer", + "format": "int32", + "description": "Number of reties to persist node table." + }, + "singleNodeModePreBootstrap": { + "type": "boolean", + "description": "If true, the node is considered in single node mode pre bootstrap." + }, + "communityEditionMode": { + "type": "boolean", + "description": "Is the cluster in community edition mode." + }, + "communityEditionCheckCmd": { + "type": "string", + "description": "Command to check if cluster is community edition." + }, + "registrationRequirement": { + "type": "string", + "description": "Indicates the registration requirement of the cluster." + }, + "bootstrapFile": { + "type": "string", + "description": "Location of bootstrap file." + }, + "nodeMonitorHeapSizeLowLimit": { + "type": "string", + "description": "Node Monitor heap size low limit (applies to 8GB RAM)." + }, + "nodeMonitorHeapSizeHighLimit": { + "type": "string", + "description": "Node Monitor heap size high limit (applies to 64GB RAM)." + }, + "nodeMonitorPerThreadStackSize": { + "type": "string", + "description": "Stack size for each thread in Node Monitor." + }, + "nodeMonitorDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Debugger port for Node Monitor." + }, + "nodeMonitorOomAdjust": { + "type": "integer", + "format": "int32", + "description": "Out of memory adjustment for Node Monitor." + }, + "clusterConfigHeapSizeLowLimit": { + "type": "string", + "description": "Cluster Config heap size low limit (applies to 8GB RAM)." + }, + "clusterConfigHeapSizeHighLimit": { + "type": "string", + "description": "Cluster Config heap size high limit (applies to 64GB RAM)." + }, + "clusterConfigPerThreadStackSize": { + "type": "string", + "description": "Stack size for each thread in Cluster Config." + }, + "clusterConfigDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Debugger port for Cluster Config." + }, + "nodeMonitorChecksNeededForOkStatus": { + "type": "integer", + "format": "int32", + "description": "Number of checks needed to pass for node monitor to be in okay status." + }, + "nodeMonitorIgnoreCheckFailuresMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds to ignore check failures." + }, + "nodeMonitorMaxTaskRunHistoryEntry": { + "type": "integer", + "format": "int32", + "description": "The max number of task run history entries saved in for each node monitor task." + }, + "hardwareCheckerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Hardware Checker run frequency in milliseconds." + }, + "nodeResurrectionWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds within which node can be resurrected." + }, + "nodeAssumeDeadWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Time in milliseconds after which node is assumed dead." + }, + "nodeMonitorUptimeGraceSeconds": { + "type": "integer", + "format": "int32", + "description": "Wait time in seconds before taking any corrective actions after reboot." + }, + "iscsiLoginTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "iSCSI login timeout in seconds." + }, + "nodeStableWindowMillis": { + "type": "integer", + "format": "int32", + "description": "Wait time in milliseconds post transition from BAD status to consider node as stable." + }, + "clusterSshRsaBits": { + "type": "integer", + "format": "int32", + "description": "Number of RSA bits for cluster SSH key." + }, + "clusterSshUser": { + "type": "string", + "description": "User for inter-node SSH." + }, + "clusterSshPort": { + "type": "integer", + "format": "int32", + "description": "Port number for inter-node SSH." + }, + "clusterSshPrivKeyFile": { + "type": "string", + "description": "Keyfile for inter node ssh; this only exists post-bootstrap." + }, + "nodeHistoryDbFile": { + "type": "string", + "description": "File path for node history DB file." + }, + "maxStatusHistoryRecords": { + "type": "integer", + "format": "int32", + "description": "Maximum number of records for status history." + }, + "checkHistoryDbFile": { + "type": "string", + "description": "File path which contains the history of all the checks run on the node." + }, + "maxCheckHistoryRecords": { + "type": "integer", + "format": "int32", + "description": "Maximum number of check history records to be kept." + }, + "takeOverWatchdog": { + "type": "boolean", + "description": "If set to true, node monitor takes over the watchdog functionality." + }, + "brikSerialCmd": { + "type": "string", + "description": "Command to get the brik serial." + }, + "brikSerialCmdTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout for beik serial command." + }, + "isNtpInSyncCmd": { + "type": "string", + "description": "Command to check if NTP is in sync." + }, + "configureIpmi": { + "type": "boolean", + "description": "Flag to indicate if IPMI should be configured." + }, + "setupIpmiNetworkingCmd": { + "type": "string", + "description": "Command to setup Ipmi networking." + }, + "hasDualNic": { + "type": "boolean", + "description": "If set to true, cluster has dual NIC." + }, + "networkInterfaceChecker": { + "type": "string", + "description": "Script to check network interface." + }, + "enableHardwareAlert": { + "type": "boolean", + "description": "If true, hardware alerts is enabled." + }, + "enableDiskAlert": { + "type": "boolean", + "description": "If true, disk alerts is enabled." + }, + "enableNicAlert": { + "type": "boolean", + "description": "If true, Nic alerts is enabled." + }, + "networkInterfaceStateDir": { + "type": "string", + "description": "Location of network interface state files." + }, + "ipSetup": { + "type": "string", + "description": "Script to configure ip/network." + }, + "interfaceIpInfo": { + "type": "string", + "description": "Script to get interface IP information." + }, + "nodeStateDir": { + "type": "string", + "description": "Location of node state files." + }, + "manageVlan": { + "type": "string", + "description": "Script to manage Vlan." + }, + "metadataEncryptionUtilsKeyDir": { + "type": "string", + "description": "Path of metadata encryption utils key directory." + }, + "metadataEncryptionUtilsKeyFilePrefix": { + "type": "string", + "description": "Prefix for metadata encryption utils key file." + }, + "metadataEncryptionUtilsOldKeySignalFile": { + "type": "string", + "description": "Path for metadata encryption utils old key signal file." + }, + "metadataEncryptionUtilsKeyDestDir": { + "type": "string", + "description": "Metadata EncryptionUtils key file backup destination diretory." + }, + "transportKeyStoreFile": { + "type": "string", + "description": "Path to transport key store file." + }, + "transportKeyStorePassword": { + "type": "string", + "description": "Transport key store password." + }, + "transportKeyStoreOwnerGroup": { + "type": "string", + "description": "Transport key store owner group." + }, + "preBootstrapTransportKeyStoreFile": { + "type": "string", + "description": "Path to pre bootstrap transport key store file." + }, + "preBootstrapTransportKeyStorePassword": { + "type": "string", + "description": "Pre bootstrap transport key store password." + }, + "generateCertPair": { + "type": "string", + "description": "Script to generate cert pair." + }, + "generateJavaKeyStore": { + "type": "string", + "description": "Script to generate java key store." + }, + "generateCockroachDbCertPair": { + "type": "string", + "description": "Script to generate CockroachDb cert pair." + }, + "localclusterOkInventory": { + "type": "string", + "description": "File to indicate if localcluster." + }, + "serviceControlScript": { + "type": "string", + "description": "Script for service control." + }, + "versionProperty": { + "type": "string", + "description": "File path for version property." + }, + "enableJavaNativeMemoryTracking": { + "type": "integer", + "format": "int32", + "description": "Flag to enable native memory tracking in Java." + }, + "checkNodeCanJoinClusterFile": { + "type": "string", + "description": "Location of script which checks whether a node can join a Cassandra cluster." + }, + "floatingIpToolCmd": { + "type": "string", + "description": "Script used to manage floating IPs." + }, + "stagingSetupToolCmd": { + "type": "string", + "description": "Cmd used to manage staging area." + }, + "stagingPath": { + "type": "string", + "description": "Location of staging area." + }, + "stagingUsbPath": { + "type": "string", + "description": "Location of USB area." + }, + "upgradeStagingLocalTryIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Time interval in seconds before next upgrade staging try." + }, + "upgradeStagingLocalTries": { + "type": "integer", + "format": "int32", + "description": "Number of upgrade staging tries." + }, + "getIpmiNetworkingCmd": { + "type": "string", + "description": "Script to get IPMI configuration on a node." + }, + "diskHealthCheckFailureStreakThreshold": { + "type": "integer", + "format": "int32", + "description": "After how many failures update disk state to FAILED." + }, + "diskHealthCheckSuccessStreakThreshold": { + "type": "integer", + "format": "int32", + "description": "Update disk state to ACTIVE if this many consecutive health checks pass." + }, + "createTopLevelFilesystemDirsCmd": { + "type": "string", + "description": "Command to create top level filesystem directories." + }, + "fstabMaxLinesPerShard": { + "type": "integer", + "format": "int32", + "description": "Maximum lines per shard for fstab." + }, + "platformMinCpuCount": { + "type": "integer", + "format": "int32", + "description": "Minimum number of CPUs required for Edge VM." + }, + "platformMinRamMb": { + "type": "integer", + "format": "int32", + "description": "Minimum amount of RAM required for Edge VM." + }, + "platformMinOsDriveIops": { + "type": "integer", + "format": "int32", + "description": "Minimum number of IOPS required on OS disk for Edge VM." + }, + "platformMinDataDriveIops": { + "type": "integer", + "format": "int32", + "description": "Minimum number of IOPS required on Data disk for Edge VM." + }, + "diskCheckerCmdTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Disk checker command timeout in secs." + }, + "diskCheckerDiskSetupTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Disk checker disk setup timeout in secs." + }, + "loopDeviceManager": { + "type": "string", + "description": "Path of loop device manager script." + }, + "matrixUuid": { + "type": "string", + "description": "Matrix UUID." + }, + "ipmiManager": { + "type": "string", + "description": "Path to IPMI manager script." + }, + "sambaSharesFile": { + "type": "string", + "description": "Location of samba shares file." + }, + "ntpHardwareCheckFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Frequency to run NTP hardware check in milliseconds." + }, + "ntpNotificationStreakThreshold": { + "type": "integer", + "format": "int32", + "description": "After how many NTP hardware check failures, notfication should be raised." + }, + "ntpNotificationDedupeWindowMillis": { + "type": "integer", + "format": "int32", + "description": "For what duration notifications should be deduped." + }, + "clusterSoftwareDir": { + "type": "string", + "description": "Cluster software directory, where tarball is kept." + }, + "installLockFile": { + "type": "string", + "description": "File to indicate if install is under progress on a node." + }, + "installFailedFile": { + "type": "string", + "description": "File to indicate if install failed on a node." + }, + "autoInstallStagingDir": { + "type": "string", + "description": "Staging dir for auto install." + }, + "clusterConfigDrivenInstallLogFile": { + "type": "string", + "description": "Log file for cluster config driven install." + }, + "installBlackListFile": { + "type": "string", + "description": "File for blacklisted install paths." + }, + "installRequestFile": { + "type": "string", + "description": "File for cluster install request." + }, + "cdmPackagesInfo": { + "type": "string", + "description": "File with information about preloaded Rubrik CDM packages." + }, + "sftpToNodeCmd": { + "type": "string", + "description": "Command to copy files from the local Rubrik node to another node." + }, + "preBootstrapNodeHistoryDbMaxRecords": { + "type": "integer", + "format": "int32", + "description": "Max records for pre bootstrap node history DB." + }, + "preBootstrapNodeHistoryDbFile": { + "type": "string", + "description": "File path for pre bootstrap node history DB file." + }, + "defaultIpmiAdminPassword": { + "type": "string", + "description": "Default ipmi admin user password." + }, + "rebootRequiredIndicatorFile": { + "type": "string", + "description": "File to indicate if reboot is required." + }, + "intentionalRebootIndicatorFile": { + "type": "string", + "description": "File to indicate intentional reboot." + }, + "forceRebooterToolPath": { + "type": "string", + "description": "Path to the tool to perform a \"safe\" force reboot of a node." + }, + "forceRebootIndicatorFile": { + "type": "string", + "description": "File to track when Node Monitor forcefully rebooted a node." + }, + "stalenessForForceRebootHours": { + "type": "integer", + "format": "int32", + "description": "After how many hours of staleness, the node should be rebooted forcefully." + }, + "vmMinFreeKBytes": { + "type": "integer", + "format": "int32", + "description": "Amount of memory to be kept free for kernel." + }, + "preserveDataDrivesOnResetIndicatorFile": { + "type": "string", + "description": "Indicator file to preserve data drives on reset." + }, + "svcLockFile": { + "type": "string", + "description": "Lock file for coordination between scripts which restart services." + }, + "biosDiskCheckFile": { + "type": "string", + "description": "Path to the script to check bios disk." + }, + "loopDeviceUpgradeTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Loop device upgrade timeout in seconds." + }, + "policyScriptGeneratorCmd": { + "type": "string", + "description": "Script to generate policy script." + }, + "validateIPConfigsOnBootstrapAndAddNode": { + "type": "boolean", + "description": "Flag to validate IP configs on bootstrap & add node." + }, + "skipNodeStalenessCheckIndicatorFile": { + "type": "string", + "description": "Indicator file to skip node staleness check." + }, + "textFileUpdateTool": { + "type": "string", + "description": "Path to text file update tool." + }, + "dhclientRestartTool": { + "type": "string", + "description": "Path to dhclient restart tool." + }, + "networkInterfaceInformationTool": { + "type": "string", + "description": "Path to text network interface information tool." + }, + "globalManagerAgentCmd": { + "type": "string", + "description": "Path to global manager agent command." + }, + "nodeMonitorStartAwaitNonUnknownState": { + "type": "boolean", + "description": "Flag to wait until node status is not UNKNOWN." + }, + "verifyBrikSerialsOnBootstrap": { + "type": "boolean", + "description": "Flag to verify brik serials on bootstrap." + }, + "manageIpv6Address": { + "type": "string", + "description": "Path to manage IPv6 address script." + }, + "ipv6UpdaterFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Update frequency for IPv6 updater in milliseconds." + }, + "dnsAndNtpMaintainerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Update frequency for DNS & NTP maintainer in milliseconds." + }, + "clusterClassificationMaintainerFrequencyMinutes": { + "type": "integer", + "format": "int32", + "description": "Update frequency for cluster classification maintainer in minutes." + }, + "snmpUpdaterFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Update frequency for snmp updater in milliseconds." + }, + "snmpCommunityStringKeywordStr": { + "type": "string", + "description": "SNMP community string keyword." + }, + "snmpTrapCommunityStringKeywordStr": { + "type": "string", + "description": "SNMP trap community string keyword." + }, + "snmpCreateUserKeywordStr": { + "type": "string", + "description": "SNMP create user keyword." + }, + "snmpUserKeywordStr": { + "type": "string", + "description": "SNMP user keyword." + }, + "snmpTrapReceiverV2KeywordStr": { + "type": "string", + "description": "SNMP trap receiver v2 keyword." + }, + "snmpTrapReceiverV3KeywordStr": { + "type": "string", + "description": "SNMP trap receiver v3 keyword." + }, + "snmpCommunityViewStr": { + "type": "string", + "description": "SNMP community view keyword." + }, + "snmpUserStr": { + "type": "string", + "description": "SNMP user keyword." + }, + "snmpManagementTool": { + "type": "string", + "description": "Path to global manager agent command." + }, + "defaultGatewayTool": { + "type": "string", + "description": "Path to global manager agent command." + }, + "dnsLookupPreferenceTool": { + "type": "string", + "description": "Path to global manager agent command." + }, + "hierarchyCacheHeapSizeLowLimit": { + "type": "string", + "description": "Hierarchy cache heap size low limit." + }, + "hierarchyCacheHeapSizeHighLimit": { + "type": "string", + "description": "Hierarchy cache heap size high limit." + }, + "hierarchyCachePerThreadStackSize": { + "type": "string", + "description": "Hierarchy cache per thread stack size." + }, + "smbServiceManagerHeapSizeLowLimit": { + "type": "string", + "description": "SMB service manager heap size low limit." + }, + "smbServiceManagerHeapSizeHighLimit": { + "type": "string", + "description": "SMB service manager heap size high limit." + }, + "smbServiceManagerPerThreadStackSize": { + "type": "string", + "description": "SMB service manager per thread stack size." + }, + "smbServiceHeapSizeLowLimit": { + "type": "string", + "description": "SMB service heap size low limit." + }, + "smbServiceHeapSizeHighLimit": { + "type": "string", + "description": "SMB service heap size high limit." + }, + "smbServicePerThreadStackSize": { + "type": "string", + "description": "SMB service per thread stack size." + }, + "emailCredentialsFetcherFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Email credentials fetcher frequency in milliseconds." + }, + "emailCredentialsFile": { + "type": "string", + "description": "Location of email credentials file." + }, + "adminEmailsFile": { + "type": "string", + "description": "Location of admin emails file." + }, + "lastEmailTimeFile": { + "type": "string", + "description": "Loacaton of last email time file." + }, + "clusterDownEmailFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Cluster down email frequency in milliseconds." + }, + "rubrikMibPolicyFile": { + "type": "string", + "description": "Location of rubrik mib policy file." + }, + "snmpMibUpdaterFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "SNMP mib updater frequency in milliseconds." + }, + "clusterScopeNodeNameForSnmpMibEntry": { + "type": "string", + "description": "Cluster scope node name for snmp mib entry." + }, + "ipv6AddressInfoTool": { + "type": "string", + "description": "Path to IPv6 address info tool." + }, + "miscConfigurationMaintainerFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "Misc configuration maintainer frequency in milliseconds." + }, + "reIpValidateNetworkEnvironment": { + "type": "boolean", + "description": "Flag to perform network environment check during ReIp." + }, + "reIpRecoveryScriptPath": { + "type": "string", + "description": "ReIp recovery script path." + }, + "cassandraBackupForReIpRecovery": { + "type": "string", + "description": "Cassandra backup for reip recovery." + }, + "enableMetadataRestoreForReIpRecovery": { + "type": "string", + "description": "Flag to enable metadata restore for ReIP recovery." + }, + "brikNotificationMessageOid": { + "type": "string", + "description": "Brik notification message Oid." + }, + "globalManagerKeyWrapCmd": { + "type": "string", + "description": "Command to get global manager wrapped key." + }, + "snmpMibDownloadDirectory": { + "type": "string", + "description": "Directory location to download SNMP mib." + }, + "nodeResetPermittanceUpdaterFrequencyMills": { + "type": "integer", + "format": "int32", + "description": "Frequency in milliseconds for node reset permittance updater." + }, + "findBadDiskScriptPath": { + "type": "string", + "description": "File path of find bad disk script." + }, + "liveMountPrecheckCmd": { + "type": "string", + "description": "Command for live mount precheck." + }, + "scanStatsPrecheckCmd": { + "type": "string", + "description": "Command for checking scan stats." + }, + "diskSpaceCheckCmd": { + "type": "string", + "description": "Command for checking disk space and usage." + }, + "permanentFalseFile": { + "type": "string", + "description": "File to indicate if decommission was run with the permanent flag set to false." + }, + "primaryNetworkInterface": { + "type": "string", + "description": "primary Network Interface." + }, + "secondaryNetworkInterface": { + "type": "string", + "description": "secondary Network Interface." + }, + "broadcastInterface": { + "type": "string", + "description": "Broadcast interface." + }, + "versionFile": { + "type": "string", + "description": "Localtion of version file." + }, + "numLoopDevices": { + "type": "integer", + "format": "int32", + "description": "Maximum number of loop devices." + }, + "transportPrivKeyFile": { + "type": "string", + "description": "Path to transport private key file." + }, + "transportCertFile": { + "type": "string", + "description": "Path to transport certificate file." + }, + "ansibleRoot": { + "type": "string", + "description": "Location of ansible." + }, + "localclusterInventory": { + "type": "string", + "description": "Location of localcluster inventory." + }, + "stagingLocalPath": { + "type": "string", + "description": "Location of local staging area." + }, + "stagingUpgradePath": { + "type": "string", + "description": "Location of upgrade staging area." + }, + "stagingInternalPath": { + "type": "string", + "description": "Location of internal staging area." + }, + "certsBackingFileSize": { + "type": "string", + "description": "Certs backing file size." + }, + "sambaBackingFileSize": { + "type": "string", + "description": "Samba backing file size." + }, + "sambaPath": { + "type": "string", + "description": "Location of samba." + }, + "cassandraSnapshotSizeMultiplier": { + "type": "integer", + "format": "int32", + "description": "Cassandra snapshot size multiplier." + }, + "cassandraSnapshotMaxDisksToUse": { + "type": "integer", + "format": "int32", + "description": "How many disks on the node to use for storing cassandra snapshots. -1 means use all disks." + }, + "certsPath": { + "type": "string", + "description": "Path to certs file." + }, + "stagingGroup": { + "type": "string", + "description": "Group name for the staging area." + }, + "useFsUuidAsWwnFile": { + "type": "string", + "description": "File to indicate if filesystem uuid should be used as wwn of disk device." + }, + "diskMetadataFilename": { + "type": "string", + "description": "Disk metadata file name." + }, + "clusterSoftwareDirForUpgrades": { + "type": "string", + "description": "Tarballs downloaded via API for upgrade are staged here." + }, + "denseMigration": { + "type": "string", + "description": "Indicates that node is under standard to dense migration." + }, + "cloudProviderFile": { + "type": "string", + "description": "File path for cloud cluster cloud provider." + }, + "sdresetSuccessFile": { + "type": "string", + "description": "File to indicate sdreset success." + }, + "rebootedIndicatorFile": { + "type": "string", + "description": "File path created to indicate reboot to Node Monitor." + }, + "sdresetOnBootstrapFailure": { + "type": "boolean", + "description": "Flag to indicate whether to reset on bootstrap failure." + }, + "diskCheckerUsedSpaceCmdTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Disk checker used space command timeout in seconds." + }, + "removeNodesMaxChildInstances": { + "type": "integer", + "format": "int32", + "description": "Max number of node removal child job instances per node." + }, + "nodeIdFile": { + "type": "string", + "description": "File path created to store nodeId." + }, + "changeHostnameCmd": { + "type": "string", + "description": "Command for changing hostname." + }, + "cloudVmMetadataServiceUrl": { + "type": "string", + "description": "Base URL for the metadata service which runs on cloud VM's." + }, + "sparkAgentPrebootstrapHelper": { + "type": "string", + "description": "Path to spark agent prebootstrap helper tool." + } + } + }, + "LocalGalactusConfig": { + "type": "object", + "properties": { + "cloudNativeSourceAddJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent add cloud native source jobs per node." + }, + "cloudNativeSourceRefreshJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cloud native Source refresh jobs per node." + }, + "cloudNativeSourceDeleteJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent delete cloud native source jobs per node." + }, + "cloudNativeSourceGcJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent gc cloud native source jobs per node." + }, + "cloudNativeVmSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent cloud native vm snapshot jobs per node." + }, + "inplaceRestoreJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent inplace restore jobs per node." + }, + "remoteCloudNativeIndexJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent remote cloud native index jobs per node." + }, + "remoteCloudNativeCreateDownloadableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent remote cloud native download jobs per node." + }, + "downloadCloudNativeFileJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent download cloud native file jobs per node." + }, + "snapshotIntegrityJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent inplace restore jobs per node." + }, + "awsCloudNativeMigrateIndexJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent migrate cloud native index jobs per node." + }, + "cloudNativeMigrateMetadataJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent migrate cloud native source jobs per node." + } + } + }, + "LocalHawkeyeConfig": { + "type": "object", + "properties": { + "sorting_builder_memory_budget_bytes": { + "type": "integer", + "format": "int32", + "description": "SSTable buffer budget size in byte used to build filesystem metadata." + }, + "searchServerIp": { + "type": "string", + "description": "Search server IP address." + }, + "searchServerPort": { + "type": "integer", + "format": "int32", + "description": "Search server port." + }, + "searchServerSslPort": { + "type": "integer", + "format": "int32", + "description": "Search server SSL port." + }, + "searchServerLogBufLevel": { + "type": "integer", + "format": "int32", + "description": "Search server log buffer level." + }, + "searchServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Search server log verbosity." + }, + "searchServerExecInstrumentation": { + "type": "string", + "description": "Search server execution instrumentation." + }, + "printDebugInfoForDownload": { + "type": "boolean", + "description": "True if print debug information for download." + }, + "indexFilesExpirationCutOffInDays": { + "type": "integer", + "format": "int32", + "description": "Index files expiration cut off in days." + }, + "indexServerIp": { + "type": "string", + "description": "Index server IP address." + }, + "indexServerPort": { + "type": "integer", + "format": "int32", + "description": "Index server port." + }, + "indexServerLogBufLevel": { + "type": "integer", + "format": "int32", + "description": "Index server log buffer level." + }, + "indexServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Index server log verbosity." + }, + "indexServerExecInstrumentation": { + "type": "string", + "description": "Index server execution instrumentation." + } + } + }, + "LocalHealthMonitorConfig": { + "type": "object", + "properties": { + "healthMonitorDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Debugger port for Health Monitor." + }, + "healthMonitorNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for health monitor." + }, + "enableHealthMonitor": { + "type": "boolean", + "description": "Feature flag to enable health monitor service." + }, + "healthMonitorHeapSizeLowLimit": { + "type": "string", + "description": "Health Monitor heap size low limit (applies to 8GB RAM)." + }, + "healthMonitorHeapSizeHighLimit": { + "type": "string", + "description": "Health Monitor heap size high limit (applies to 64GB RAM)." + }, + "healthMonitorPerThreadStackSize": { + "type": "string", + "description": "Stack size for each thread in Health Monitor." + }, + "healthMonitorPolicyConfFile": { + "type": "string", + "description": "Config file for health monitor policies." + }, + "healthMonitorPort": { + "type": "integer", + "format": "int32", + "description": "Port number for Health Monitor service." + }, + "checksNeededForOkStatus": { + "type": "integer", + "format": "int32", + "description": "Number of checks needed to pass for health monitor to be in OK status." + } + } + }, + "LocalInfinityConfig": { + "type": "object", + "properties": { + "maxConcurrentIngestsPerNutanixVm": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Nutanix disks Rubrik should ingest in parallel." + }, + "maxConcurrentExportsPerNutanixVm": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Nutanix disks Rubrik should export in parallel." + }, + "maxConcurrentExportsPerStorageArrayVolumeGroup": { + "type": "integer", + "format": "int32", + "description": "Maximum number of Storage Array volumes that can be exported in parallel." + }, + "allowFloatingIpForMountOnRemoteNode": { + "type": "boolean", + "description": "Allow VMware Live Mount to use floating ip regardless of which node the the MOUNT job is run on. If false the node must own a floating IP in order to use it." + }, + "overrideFloatingIpForFlashDense": { + "type": "boolean", + "description": "Override floating ips for VMware live mounts if there exist flash-dense nodes in the cluster. If true the Live Mount job will run on a flash-dense node even if the node does not have a floating ip." + }, + "vcdExportJobSpawnPollingDurationInMillis": { + "type": "integer", + "format": "int32", + "description": "The periodicity at which the vcd vapp export job will poll for completion of child export jobs." + }, + "parallelizedDbLogDelayNotificationMaxChildJobsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent child job instances for the parallelized database log backup delay notification job." + }, + "agentServerPort": { + "type": "integer", + "format": "int32", + "description": "Agent server port." + }, + "agentServerSecurePort": { + "type": "integer", + "format": "int32", + "description": "Agent server secure port." + }, + "agentServerIp": { + "type": "string", + "description": "Agent server ip." + }, + "agentServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Agent server log verbosity." + }, + "agentServerExecInstrumentation": { + "type": "string", + "description": "Agent server execution instrumentation." + }, + "agentServerMockMode": { + "type": "boolean", + "description": "In mock mode we behave as if we are contacting the external backup agents but we are just pretending they exist. This is used to create metadata stress on Cassandra and REST apis." + }, + "agentServerNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files in agent server." + }, + "mke2fsOptions": { + "type": "string", + "description": "Options passed to mke2fs command line." + }, + "mountExt4Options": { + "type": "string", + "description": "Options passed to mount ext4 command line." + }, + "doSyncExt4Writes": { + "type": "boolean", + "description": "Whether to do synchronous ext4 writes for Filesets." + }, + "internalBackupAgentPort": { + "type": "integer", + "format": "int32", + "description": "Internal backup agent port." + }, + "internalBackupAgentLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Internal backup agent log verbosity." + }, + "internalBackupAgentExecInstrumentation": { + "type": "string", + "description": "Internal backup agent execution instrumentation." + }, + "internalBackupAgentNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files in internal backup agent." + }, + "vssAgentDirectory": { + "type": "string", + "description": "Directory where the VSS agent package is stored." + }, + "vssGuestProgramCheckInterval": { + "type": "integer", + "format": "int32", + "description": "Interval (in milliseconds) to check the status of VSS guest programs." + }, + "vssGuestProgramTimeout": { + "type": "integer", + "format": "int32", + "description": "Timeout (in milliseconds) to wait for a VSS guest program to finish." + } + } + }, + "LocalJarvisConfig": { + "type": "object", + "properties": { + "sfdcOrgId": { + "type": "string", + "description": "Salesforce Organization ID for Rubrik." + }, + "sfdcHostname": { + "type": "string", + "description": "Salesforce hostname for Rubrik support portal." + }, + "sfdcAuthEndPoint": { + "type": "string", + "description": "Salesforce community portal endpoint for login." + }, + "sfdcRegistrationSvcEndPoint": { + "type": "string", + "description": "Salesforce community portal endpoint for cluster registration service." + }, + "sfdcChiselPort": { + "type": "string", + "description": "Local port used by chisel for proxy to SFDC." + }, + "heartbeatChiselPort": { + "type": "string", + "description": "Local port used by chisel for the proxy to the heartbeat server." + }, + "heartbeatHostname": { + "type": "string", + "description": "Local port used by chisel for the proxy to the heartbeat server." + }, + "hekaTelemetryConfig": { + "type": "string", + "description": "Heka external output config file destination." + }, + "telegrafTelemetryConfig": { + "type": "string", + "description": "Telegraf external output config file destination." + }, + "opentracingTelemetryConfig": { + "type": "string", + "description": "Opentracing external output config file destination." + }, + "chiselServer": { + "type": "string", + "description": "Target Chisel server in production or lab for testing." + }, + "chiselLiveStatsLocalPort": { + "type": "integer", + "format": "int32", + "description": "Chisel live stats local port." + }, + "chiselLiveStatsRemoteHostPort": { + "type": "string", + "description": "Chisel live stats remote host and port." + }, + "chiselLiveTaggedStatsLocalPort": { + "type": "integer", + "format": "int32", + "description": "Chisel live tagged stats local port." + }, + "chiselLiveTaggedStatsRemoteHostPort": { + "type": "string", + "description": "Chisel live tagged stats remote host and port." + }, + "chiselHekaLogsLocalPort": { + "type": "integer", + "format": "int32", + "description": "Chisel Heka logs local port." + }, + "chiselHekaLogsRemoteHostPort": { + "type": "string", + "description": "Chisel Heka logs remote host and port." + }, + "chiselFluentdLogsLocalPort": { + "type": "integer", + "format": "int32", + "description": "Chisel Fluentd logs local port." + }, + "chiselFluentdLogsRemoteHostPort": { + "type": "string", + "description": "Chisel Fluentd logs remote host and port." + }, + "chiselLiveTracesLocalPort": { + "type": "integer", + "format": "int32", + "description": "Opentracing GRPC collector port (Chisel receives and forwards)." + }, + "chiselLiveTracesRemoteHostPort": { + "type": "string", + "description": "Chisel live traces (Jaeger Agent) remote host and port." + }, + "influxdbHttpPort": { + "type": "integer", + "format": "int32", + "description": "InfluxDB HTTP port." + }, + "influxdbVirtMemLimit": { + "type": "integer", + "format": "int32", + "description": "InfluxDB virtual memory limit." + }, + "influxdbPhysMemLimit": { + "type": "integer", + "format": "int64", + "description": "InfluxDB physical memory limit." + }, + "telegrafPort": { + "type": "integer", + "format": "int32", + "description": "Notify buffer size threshold for CDP log receiver in bytes." + }, + "influxgraphHttpPort": { + "type": "integer", + "format": "int32", + "description": "InfluxGraph HTTP port." + }, + "diamondMemLimit": { + "type": "integer", + "format": "int32", + "description": "Diamond memory limit." + } + } + }, + "LocalLambdaConfig": { + "type": "object", + "properties": { + "detectRansomwareJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda ransomware detection jobs per node." + }, + "uploadFmdJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda upload FMD jobs per node." + }, + "analyzeContentJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda content analysis jobs per node." + }, + "analyzeContentInParallelJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda content analysis in parallel jobs per node." + }, + "analyzeContentInParallelChildJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda content analysis in parallel child jobs per node." + }, + "sendAuditConfigJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent sendAuditConfig jobs per node." + }, + "lambdaAnalyzerConcurrency": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent lambda analysis threads per job." + }, + "tikaForkParserChildren": { + "type": "integer", + "format": "int32", + "description": "Number of child processes created by Tika fork parser." + }, + "resolveSidJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "A value that specifies the maximum number of resolveSid jobs that can run concurrently on a single node." + }, + "activeDirectoryExportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent active directory export jobs per node." + }, + "lambdaServerLogBufLevel": { + "type": "integer", + "format": "int32", + "description": "Lambda server log buffer level." + }, + "lambdaServerPort": { + "type": "integer", + "format": "int32", + "description": "Lambda server port." + }, + "lambdaServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Lambda server log verbosity." + }, + "lambdaServerExecInstrumentation": { + "type": "string", + "description": "Lambda server execution instrumentation." + }, + "lambdaAnalyzerServerPort": { + "type": "integer", + "format": "int32", + "description": "Lambda content analyzer server port." + }, + "lambdaAnalyzerServerPprofPort": { + "type": "integer", + "format": "int32", + "description": "Lambda content analyzer pprof server port." + }, + "lambdaParserServiceHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for lambda parser service (ideally to be scaled using local config)." + }, + "lambdaParserServiceHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for lambda parser service (ideally to be scaled using local config)." + }, + "lambdaParserServicePerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for lambda parser service." + }, + "lambdaParserServiceDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Lambda parser service debugger port." + }, + "lambdaParserServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Lambda parser service thrift port." + } + } + }, + "LocalManagedVolumeConfig": { + "type": "object", + "properties": { + "maxSizePerChannelInGb": { + "type": "integer", + "format": "int32", + "description": "Maximum size per managed volume channel in GBs." + }, + "maxSizePerChannelNoDedupeInGb": { + "type": "integer", + "format": "int32", + "description": "Maximum size per managed volume channel in GBs if deduplication is not used." + }, + "maxChannelsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of channels per node for all managed volumes of the cluster." + }, + "maxSlaBasedChannelsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of channels per node for all SLA Managed Volumes of the cluster." + }, + "maxLiveMountChannelsPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of live mount channels per node for all managed volumes of the cluster." + }, + "managedVolumeResizeJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume resize jobs per node." + }, + "managedVolumeResetJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume reset jobs per node." + }, + "waspServerLogBufLevel": { + "type": "integer", + "format": "int32", + "description": "Wasp server log buffer level." + }, + "waspServerPort": { + "type": "integer", + "format": "int32", + "description": "Wasp server port." + }, + "waspServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Wasp server log verbosity." + }, + "waspServerExecInstrumentation": { + "type": "string", + "description": "Wasp server execution instrumentation." + }, + "managedVolumeBackupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent managed volume backup jobs per node." + }, + "slaManagedVolumeClientLinuxScriptDirectoryBlacklist": { + "type": "string", + "description": "List of Linux first level script directories that are blacklisted." + }, + "slaManagedVolumeClientSolarisScriptDirectoryBlacklist": { + "type": "string", + "description": "List of Solaris first level script directories that are blacklisted." + }, + "slaManagedVolumeClientLinuxMountDirectoryBlacklist": { + "type": "string", + "description": "List of Linux first level mount directories that are blacklisted." + }, + "slaManagedVolumeClientSolarisMountDirectoryBlacklist": { + "type": "string", + "description": "List of Solaris first level mount directories that are blacklisted." + } + } + }, + "LocalMssqlConfig": { + "type": "object", + "properties": { + "logBackupJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL log backup jobs per node." + }, + "logApplyJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL log apply jobs per node." + }, + "deleteSecondaryJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL delete secondary jobs per node." + }, + "mssqlBatchSnapshotJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL batch snapshot jobs per node." + }, + "mssqlUberJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent MSSQL uber jobs per node." + }, + "fileDownloadErrorKnowledgeBaseArticleWebLink": { + "type": "string", + "description": "Web link for the knowledge base article on how to debug file download errors." + } + } + }, + "LocalOracleConfig": { + "type": "object", + "properties": { + "maxUploadOracleLogParallelism": { + "type": "integer", + "format": "int32", + "description": "Maximum parallelism on uploading oracle log group content in log upload jobs." + } + } + }, + "LocalQuicksilverConfig": { + "type": "object", + "properties": { + "jobCategories": { + "type": "string", + "description": "Thread pool categories for each job type, stored as a serialized Map[String, String]. See JobType.scala for the keys, and JobQueueCategory.scala for the values. DO NOT MAKE MANUAL CHANGES to this config on a live system. Mistakes will break JFL." + }, + "jobFetcherNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Maximum number of compressed log files to save." + }, + "jobFetcherCurrentLogSize": { + "type": "integer", + "format": "int32", + "description": "Max size of the current log file before it gets rolled." + }, + "jobFetcherQueueDebugIntervalSeconds": { + "type": "integer", + "format": "int32", + "description": "JFL prints its active and queued jobs periodically. This config controls the minimum amount of time between these logs." + }, + "jflServerMinWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Minimum number of worker threads in jfl server thread pool." + }, + "jflServerMaxWorkerThreads": { + "type": "integer", + "format": "int32", + "description": "Maimum number of worker threads in jfl server thread pool." + }, + "jobFetcherBadNodeSleepTimeMillis": { + "type": "integer", + "format": "int32", + "description": "Sleep time between each check on the status of a failed node." + }, + "jobFetcherConsecutiveErrorsThreshold": { + "type": "integer", + "format": "int32", + "description": "Number of consecutive errors JobFetcherLoop will tolerate before concluding something is really wrong, giving up, and suiciding. JFL sleeps 2 minutes between errors by default, so 60 consecutive errors is about 2 hours." + }, + "SystemCriticalThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the SystemCritical category thread pool." + }, + "TimeSensitiveThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the TimeSensitive category thread pool." + }, + "BackupThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Backup category thread pool." + }, + "ShortRpoBackupThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the LogBackup category thread pool." + }, + "ReplicationThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Replication category thread pool." + }, + "ArchiveThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Archive category thread pool." + }, + "IndexThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Index category thread pool." + }, + "BackgroundHighThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the BackgroundHigh category thread pool." + }, + "BackgroundLowThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the BackgroundLow category thread pool." + }, + "ChildrenThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Children category thread pool." + }, + "LightweightTimeSensitiveThreadPoolSize": { + "type": "integer", + "format": "int32", + "description": "Size of the Lightweight TimeSensitive category thread pool." + }, + "jflServerThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to be used by jfl server, which is used to compute stream counts and enqueue one-off jobs." + }, + "maxMetadataOperationRetries": { + "type": "integer", + "format": "int32", + "description": "Number of times metadata operations should be retried upon failure." + }, + "jflMetadataQueryRetryMaxWaitMills": { + "type": "integer", + "format": "int32", + "description": "Upper-bound on number of milliseconds to wait before retrying metadata operation failure." + }, + "dummyParallelizableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent job instances for an internal test Parallelizable job." + }, + "dummyParallelizableChildJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent child job instances for an internal test Parallelizable job." + }, + "dummyResumableJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent job instances for an internal test Resumable job." + }, + "cpuMonitoringCollectorIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Frequency to collect and emit per job CPU metrics." + }, + "interruptJobProbability": { + "type": "number", + "format": "double", + "description": "Probability of force interrupting a job in middle of its execution. This config should have a non-zero value only in test mode. It is used to test resumability of job at different stages of job execution." + }, + "jobFetcherHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for job fetcher (ideally to be scaled using local config)." + }, + "jobFetcherHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for job fetcher (ideally to be scaled using local config)." + }, + "jobFetcherPerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for job fetcher." + }, + "jobFetcherDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Job fetcher debugger port." + }, + "shouldAttachRkProfiler": { + "type": "boolean", + "description": "Whether RK profiler should be attached." + }, + "jflShardWeight": { + "type": "integer", + "format": "int32", + "description": "The weight attributed to this node when determining how many shards it should scan of the job_instance table. Each node takes a number of shards proportional to its weight (or, if there are multiple nodes, 1/2 of the total shards in the cluster, whichever is smaller)." + }, + "slaDomainGoldConfig": { + "type": "string", + "description": "Default SLA domains for gold configuration." + }, + "slaDomainSilverConfig": { + "type": "string", + "description": "Default SLA domains for silver configuration." + }, + "slaDomainBronzeConfig": { + "type": "string", + "description": "Default SLA domains for bronze configuration." + }, + "slaDomainLegacyExpireConfig": { + "type": "string", + "description": "This is used in post_up_100 for installtions." + }, + "jobMaintainerMaxFailedCount": { + "type": "integer", + "format": "int32", + "description": "Job Maintainer maximum failure count." + }, + "jobMaintainerMaxRunTimeInMins": { + "type": "integer", + "format": "int32", + "description": "Job Maintainer maximum run time in minutes." + }, + "frequentStatsUpdaterJobThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads required to run the FrequentStatsJob." + }, + "infrequentStatsUpdaterJobThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads required to run the InfrequentStatsJob." + }, + "parallelizedStatsUpdaterJobThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads required to run the ParallelizedStatsJob." + }, + "clusterwideStatsUpdaterJobThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads required to run the clusterwideStatsJob." + }, + "storageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute storage stats every {{ frequentStatsFrequency }} minutes." + }, + "cloudStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute cloud storage stats every {{ frequentStatsFrequency }} minutes." + }, + "slaDomainStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute slaDomain storage stats every {{ frequentStatsFrequency }} minutes." + }, + "crossCompressionStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute cross compression storage stats every {{ frequentStatsFrequency }} minutes." + }, + "fullCompressionStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute full compression storage stats every {{ frequentStatsFrequency }} minutes." + }, + "perSnappableStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute per snappable storage stats every {{ frequentStatsFrequency }} minutes." + }, + "vmwareGraphiteStatsFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute vmware graphite stats every {{ infrequentStatsFrequency }} minutes." + }, + "snappableCountStatsPeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute snappable count stats every {{ infrequentStatsFrequency }} minutes." + }, + "clusterwideStatsIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute snappable count stats every {{ clusterwideStatsIntervalInMin }} minutes." + }, + "snapshotStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute snapshot storage stats every {{ frequentStatsFrequency }} minutes." + }, + "snapshotCountFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Count the number of snapshots every {{ frequentStatsFrequency }} minutes." + }, + "totalPhysicalStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute total physical storage stats every {{ frequentStatsFrequency }} minutes." + }, + "perVmReplicatedStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute replicated vm storage stats every {{ frequentStatsFrequency }} minutes." + }, + "perSnappableOnDemandSnapshotInfoFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute on demand snapshot info stats every {{ frequentStatsFrequency }} minutes." + }, + "perSnappableSnapshotInfoFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute snapshot info stats every {{ frequentStatsFrequency }} minutes." + }, + "cdpStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute cdp storage stats every {{ frequentStatsFrequency }} minutes." + }, + "perMountStorageFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Compute per mount storage every {{ frequentStatsFrequency }} minutes." + }, + "createCloudImageJobFrequencyInMins": { + "type": "integer", + "format": "int32", + "description": "Create cloud image job frequency in minutes." + }, + "throttleWaitTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Throttle wait timeout in seconds." + }, + "throttleSleepInSeconds": { + "type": "integer", + "format": "int32", + "description": "Throttle sleep in seconds." + }, + "highPriorityJobsAllocationFactor": { + "type": "number", + "format": "double", + "description": "Factor by which reserved space can be allocated to hi pri job type. This means high priority jobs can allocate 100% of reserved space." + }, + "midPriorityJobsAllocationFactor": { + "type": "number", + "format": "double", + "description": "Factor by which reserved space can be allocated to mid pri job type. This means mid priority jobs can allocate 75% of reserved space." + }, + "lowPriorityJobsAllocationFactor": { + "type": "number", + "format": "double", + "description": "Factor by which reserved space can be allocated to low pri job type. This means low priority jobs can allocate 0% of reserved space." + }, + "minPercentDiskSpaceReserved": { + "type": "number", + "format": "double", + "description": "Minimum percent of total disk space that has to be marked as reserved space." + }, + "maxPercentDiskSpaceReserved": { + "type": "number", + "format": "double", + "description": "Maximum percent of total disk space that can be marked as reserved space." + }, + "dataReductionEstimatePct": { + "type": "number", + "format": "double", + "description": "Coefficient for estimating the storage needed for a full/incremental snapshot." + }, + "calculateEffectiveSlaJobNumRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for calculate effective SLA job." + }, + "calculateEffectiveSlaJobSleepForSemaphoreInSeconds": { + "type": "integer", + "format": "int32", + "description": "Sleep time in seconds between attempts to acquire the semaphore to calculate effective SLA domains." + } + } + }, + "LocalRemoteClusterConfig": { + "type": "object", + "properties": { + "remoteClusterHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for remote cluster service (ideally to be scaled using local config)." + }, + "remoteClusterHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for remote cluster service (ideally to be scaled using local config)." + }, + "remoteClusterPerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for remote cluster service." + }, + "remoteClusterDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Remote cluster service debugger port." + }, + "remoteClusterPort": { + "type": "integer", + "format": "int32", + "description": "Remote cluster service port." + }, + "remoteClusterSocketTimeoutInMs": { + "type": "integer", + "format": "int32", + "description": "Remote cluster service socket timeout in milliseconds." + }, + "remoteClusterTrustedCertsDir": { + "type": "string", + "description": "Remote cluster service trusted certificates directory." + } + } + }, + "LocalReplicationConfig": { + "type": "object", + "properties": { + "replicationNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for replication." + }, + "replicationCurrentLogSize": { + "type": "integer", + "format": "int32", + "description": "Log size for replication." + }, + "replicationServicePort": { + "type": "integer", + "format": "int32", + "description": "Port for replication." + }, + "replicationLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Log verbosity for replication." + }, + "replicationExecInstrumentation": { + "type": "string", + "description": "Replication execution instrumentation." + }, + "maxClientRetries": { + "type": "integer", + "format": "int32", + "description": "Number of clients to retry per request." + }, + "jobStatusSourceQuantumFactor": { + "type": "integer", + "format": "int32", + "description": "Job status source quantum factor." + }, + "jobStatusSourceIntervalFactor": { + "type": "integer", + "format": "int32", + "description": "Job status source interval factor." + }, + "replicationTransferUnitBytes": { + "type": "integer", + "format": "int32", + "description": "Max data transfer size for replication RPC." + }, + "maxReplicationConnections": { + "type": "integer", + "format": "int32", + "description": "Max number of connections to remote location. This represents the max number of connections used to replicate one transfer unit of data from remote location. Each stream can use up to `maxReplicationConnections` connections. Increase number of replicating streams before adjusting number of connections per stream. More connections can lead to increased latency from duration spent establishing connection." + }, + "maxReplicationExtentSize": { + "type": "integer", + "format": "int32", + "description": "Max data to for one exent." + }, + "remoteRequestRetries": { + "type": "integer", + "format": "int32", + "description": "Number of retries for remote cluster RPCs." + }, + "thriftConnectionTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Thrift connection timeout in seconds." + }, + "thriftSocketSendTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Thrift socket send timeout in seconds." + }, + "thriftSocketRecvTimeoutSecs": { + "type": "integer", + "format": "int32", + "description": "Thrift socket recv timeout in seconds." + }, + "cdpReplicationGlobalBufferEnabled": { + "type": "boolean", + "description": "True if enable memory backed kvstore for buffering." + }, + "cdpReplicationSsdBufferEnabled": { + "type": "boolean", + "description": "True if enable SSD backed kvstore for buffering." + }, + "remoteRpcMinSleepSecs": { + "type": "integer", + "format": "int32", + "description": "Minimum seconds between retries for any RemoteExtent RPC." + }, + "remoteRpcMaxSleepSecsForInc": { + "type": "integer", + "format": "int32", + "description": "Maximum seconds between retries for RemoteExtent RPC." + }, + "remoteRpcMaxSleepSecsForFull": { + "type": "integer", + "format": "int32", + "description": "Maximum seconds between retries for RemoteExtent RPC." + }, + "deleteReplicationSourceJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the delete replication source job cannot acquire resources." + }, + "sendEventsRemoteJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the send events remote job cannot acquire resources." + } + } + }, + "LocalSapHanaConfig": { + "type": "object", + "properties": { + "sapHanaDbIntegrityJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent SAP HANA DB integrity jobs per node." + } + } + }, + "LocalShieldConfig": { + "type": "object", + "properties": { + "authDomainUtilNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for AuthDomainUtil cache refreshes." + }, + "managedHierarchyCacheNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for ManagedHierarchyCache refreshes." + }, + "smbServiceManagerNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for Smb Service Manager service." + }, + "managedHierarchyCacheServiceNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for ManagedHierarchyCacheService refreshes." + }, + "managedHierarchyCacheServiceIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Interval at which the ManagedHierarchyCacheService is refreshd." + }, + "principalCacheNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for principal cache refreshes." + }, + "graphQlNumThreads": { + "type": "integer", + "format": "int32", + "description": "Number of threads for GraphQL query execution." + }, + "ntpKeysFile": { + "type": "string", + "description": "File to store NTP Symmetric keys." + }, + "ntpClusterAuthKeyNum": { + "type": "integer", + "format": "int32", + "description": "NTP Symmetric key number used by cluster." + }, + "maxResultsForUnauthorizedPrincipalSearch": { + "type": "integer", + "format": "int32", + "description": "Maximum number of principals listed for unauthorized principals." + }, + "ldapThreadpoolRefreshIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Interval at which the LDAP thread pool is refreshed." + }, + "ldapCacheIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Interval at which LDAP record of users are refreshed." + }, + "ldapCacheMaxNumElements": { + "type": "integer", + "format": "int32", + "description": "Maximum number of entries in LDAP cache." + }, + "ldapSearchTimeoutMillis": { + "type": "integer", + "format": "int32", + "description": "LDAP search timeout in ms. Set to the API timeout." + }, + "ldapParallelThreadsForServerProbes": { + "type": "integer", + "format": "int32", + "description": "Number of LDAP threads to probe for server connectivity." + }, + "globalManagerTokenFile": { + "type": "string", + "description": "File to store global manager token." + }, + "globalManagerPubKeyFile": { + "type": "string", + "description": "File to store global manager pubkey." + }, + "localAdminUsername": { + "type": "string", + "description": "Username of local admin user." + }, + "rkcliUser": { + "type": "string", + "description": "Username for logging into the Rubrik CLI (rkcli)." + }, + "stagingUser": { + "type": "string", + "description": "Username for the staging area." + }, + "supportUsername": { + "type": "string", + "description": "Username of support user." + }, + "sparkUsername": { + "type": "string", + "description": "Username of spark user." + }, + "sedUnlockCmd": { + "type": "string", + "description": "Command to unlock sed." + }, + "sedUnlockTryIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Intervals for retrying to unlock sed in seconds." + }, + "sedNonRootUnlockTries": { + "type": "integer", + "format": "int32", + "description": "Retries to unlock non-root sed." + }, + "softwareEncryptionUnlockCmd": { + "type": "string", + "description": "Command to unlock software encryption." + }, + "defaultApiServerCipherSuites": { + "type": "string", + "description": "Default API server cipher suites." + }, + "defaultLocalSslCipherSuites": { + "type": "string", + "description": "Default local SSL cipher suites." + }, + "keyWrapperPwdUnixDomainSocketPath": { + "type": "string", + "description": "Key-wrapper password unix domain socket path." + }, + "keyWrapperExecInstrumentation": { + "type": "string", + "description": "Key-wrapper execution instrumentation." + }, + "keyWrapperLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Key-wrapper log verbosity." + }, + "tpmStartupBinary": { + "type": "string", + "description": "TPM startup binary location." + }, + "pwdBasedKekSaltPath": { + "type": "string", + "description": "Password based kek salt path." + }, + "inMemoryPwdBasedKeyPath": { + "type": "string", + "description": "In memory password based key path." + }, + "inMemoryPwdBasedKeyPathOld": { + "type": "string", + "description": "In memory password based old key path. The old key file is written during PWD key rotation." + }, + "samlResponseAuthzAttrName": { + "type": "string", + "description": "SAML response authorization attribute." + }, + "samlResponseUsernameAttrName": { + "type": "string", + "description": "SAML response username. Used for GPS federated login." + }, + "samlSsoAssertionConsumerServiceUrl": { + "type": "string", + "description": "SAML SSO assertion consumer service url." + }, + "samlResponseEmailAttrName": { + "type": "string", + "description": "SAML response email attr name." + }, + "samlResponseGroupAttrName": { + "type": "string", + "description": "SAML response group attr name." + }, + "listSmbServicesCmd": { + "type": "string", + "description": "List smb services cmd." + }, + "smbServiceBringupRetries": { + "type": "integer", + "format": "int32", + "description": "Smb service bringup retries." + }, + "smbServiceThriftServerTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Smb service thrift server timeout in seconds." + }, + "smbServiceFailureThreshold": { + "type": "integer", + "format": "int32", + "description": "Smb service failure threshold." + }, + "smbServiceHealthCheckIntervalInSeconds": { + "type": "integer", + "format": "int32", + "description": "Smb service health check interval in seconds." + }, + "sshBannerFilePath": { + "type": "string", + "description": "Ssh banner file path." + }, + "smbServicesRoot": { + "type": "string", + "description": "Smb services root." + }, + "smbServicesLogRoot": { + "type": "string", + "description": "Smb services log root." + }, + "smbServiceConfigGenCmd": { + "type": "string", + "description": "Smb service config generate command." + }, + "smbServiceGenCmd": { + "type": "string", + "description": "Smb service generate command." + }, + "smbServiceContainerStartCmd": { + "type": "string", + "description": "Smb service container start command." + }, + "smbServiceConfigToolArg": { + "type": "string", + "description": "Smb service config tool arg." + }, + "smbServiceContainerArg": { + "type": "string", + "description": "Smb service container arg." + }, + "winbindHealthCheckCmd": { + "type": "string", + "description": "Winbind health check command." + }, + "nginxSmbConf": { + "type": "string", + "description": "Nginx smb conf file." + }, + "smbServiceLocalDomain": { + "type": "string", + "description": "Smb service local domain name." + }, + "tpmUpgradeCheckFrequencySeconds": { + "type": "integer", + "format": "int32", + "description": "Tpm upgrade check frequency in seconds." + }, + "tpmUpgradeUtil": { + "type": "string", + "description": "Location of TPM upgrade util." + }, + "initialKeyEncryptionKeyProtectionType": { + "type": "string", + "description": "Initial Key protection type for key encryption key." + }, + "envoyPrivKeyFile": { + "type": "string", + "description": "Envoy private key file." + }, + "envoyCertFile": { + "type": "string", + "description": "Envoy cert file." + }, + "sprayServerCertValidityDays": { + "type": "integer", + "format": "int32", + "description": "Spray server cert validity days." + }, + "cockroachDbKeyCertDir": { + "type": "string", + "description": "Cockroachdb key cert directory." + }, + "tmpTransportKeyPathPrefix": { + "type": "string", + "description": "TPM transport key path prefix." + }, + "clusterRksupportCredentialRetryIntervalMillis": { + "type": "integer", + "format": "int32", + "description": "Cluster rksupport credential retry interval in milliseconds." + }, + "clusterRksupportCredentialFilePath": { + "type": "string", + "description": "Cluster rksupport credential file path." + }, + "sedPasswordDir": { + "type": "string", + "description": "Sed password dir." + }, + "tpmPcrBinary": { + "type": "string", + "description": "TPM platform configuration register binary." + }, + "sedPcr": { + "type": "integer", + "format": "int32", + "description": "Sed platform configuration register." + }, + "setupEncryptionKeysCmd": { + "type": "string", + "description": "Setup encryption keys command." + }, + "keyWrapperUnixDomainSocketPath": { + "type": "string", + "description": "Key wrapper unix domain socket path." + }, + "sprayServerPrivKeyFile": { + "type": "string", + "description": "Spray server priv key file." + }, + "sprayServerCertFile": { + "type": "string", + "description": "Spray server cert file." + }, + "sprayServerKeyStoreFile": { + "type": "string", + "description": "Spray server key store file." + }, + "webServerSignedKeyStoreFile": { + "type": "string", + "description": "Web server signed key store file." + }, + "sprayServerKeyStoreOwnerGroup": { + "type": "string", + "description": "Spray server key store owner group." + }, + "sslDhParameterFile": { + "type": "string", + "description": "SSL DH (Diffie-Hellman) parameter file." + }, + "ipmiRubrikUsername": { + "type": "string", + "description": "Ipmi rubrik username." + }, + "ipmiPasswordMaxLength": { + "type": "integer", + "format": "int32", + "description": "Ipmi password max length." + }, + "keyWrapperBinary": { + "type": "string", + "description": "key wrapper binary." + }, + "iperfDefaultPort": { + "type": "integer", + "format": "int32", + "description": "Default port for iperf." + }, + "iperf3DefaultPort": { + "type": "integer", + "format": "int32", + "description": "Default port for iperf3." + }, + "opensslAbsolutePath": { + "type": "string", + "description": "Path to Openssl." + }, + "tpmFirmwareUpgradeToolPath": { + "type": "string", + "description": "TPM firmware upgrade tool path." + }, + "rebootRequiredAfterTpmFirmwareUpdateIndicatorFile": { + "type": "string", + "description": "Indicator file for reboot required after TPM firmware update." + }, + "rebootRequiredAfterClearingTpmUpdaterOwnershipIndicatorFile": { + "type": "string", + "description": "Indicator file for reboot required after clearing TPM updater ownership." + }, + "takeOwnershipScriptPath": { + "type": "string", + "description": "Path to takeownership script." + }, + "iptablesCmd": { + "type": "string", + "description": "Command to get IPv4 iptables." + }, + "ip6tablesCmd": { + "type": "string", + "description": "Command to get IPv6 iptables." + }, + "iptablesRestoreCmd": { + "type": "string", + "description": "Command to restore iptables." + }, + "ip6tablesRestoreCmd": { + "type": "string", + "description": "command to restore IPv6 tables." + }, + "iptablesDir": { + "type": "string", + "description": "Iptables dir location." + }, + "iptablesDefaultRuleFile": { + "type": "string", + "description": "Iptables default rule file." + }, + "ip6tablesDefaultRuleFile": { + "type": "string", + "description": "IPv6 iptables default rule file." + }, + "iptablesSavedRulesDir": { + "type": "string", + "description": "Iptables saved rules dir." + }, + "iptablesInternodeInputWhitelistChain": { + "type": "string", + "description": "Iptables internode input whitelist chain." + }, + "iptablesExternalInputAllIfaceWhitelistChain": { + "type": "string", + "description": "Iptables external input all interface whitelist chain." + }, + "iptablesExternalInputDataWhitelistChain": { + "type": "string", + "description": "Iptables external input data whitelist chain." + }, + "iptablesExternalInputManagementWhitelistChain": { + "type": "string", + "description": "Iptables external input management whitelist chain." + }, + "iptablesTempInputWhitelistChain": { + "type": "string", + "description": "Iptables temp input whitelist chain." + }, + "iptablesOutputChain": { + "type": "string", + "description": "Iptables output chain." + }, + "iptablesRemovedNodeOutputBlacklistChain": { + "type": "string", + "description": "Iptables removed node output blacklist chain." + }, + "iptablesClusterRemoveNodeBlacklistChain": { + "type": "string", + "description": "Iptables cluster remove node blacklist chain." + }, + "iptablesClusterAddNodeWhitelistChain": { + "type": "string", + "description": "Iptables cluster add node whitelist chain." + }, + "iptablesEnableSshPort22Chain": { + "type": "string", + "description": "Iptables enable ssh port 22 chain." + }, + "iptablesDisableChiselLogsChain": { + "type": "string", + "description": "Iptables disable chisel logs chain." + }, + "iptablesDisableChiselStatsChain": { + "type": "string", + "description": "Iptables disable chisel stats chain." + }, + "iptablesDisableChiselTracesChain": { + "type": "string", + "description": "Iptables disable chisel traces chain." + }, + "iptablesDisableChiselTaggedStatsChain": { + "type": "string", + "description": "Iptables disable chisel tagged stats chain." + }, + "iptablesLogDroppedInputChain": { + "type": "string", + "description": "Iptables log dropped input chain." + }, + "iptablesInternodeInputIpv4WhitelistFile": { + "type": "string", + "description": "Iptables internode input IPv4 whitelist file." + }, + "iptablesExternalInputIpv4WhitelistFile": { + "type": "string", + "description": "Iptables external input IPv4 whitelist file." + }, + "iptablesRemovedNodeOutputIpv4BlacklistFile": { + "type": "string", + "description": "Iptables removed node output IPv4 blacklist file." + }, + "iptablesLogDroppedInputIpv4File": { + "type": "string", + "description": "Iptables log dropped input IPv4 file." + }, + "iptablesLogDroppedInputIpv6File": { + "type": "string", + "description": "Iptables log dropped input IPv6 file." + }, + "iptablesInputLogAveragePacketsPerSecond": { + "type": "integer", + "format": "int32", + "description": "Iptables input log average packets per second." + }, + "iptablesEnableSshPort22File": { + "type": "string", + "description": "Iptables enable ssh port 22 file." + }, + "iptablesDisableChiselTelemetryFile": { + "type": "string", + "description": "Iptables disable chisel telemetry file." + }, + "keyRotationCheckMillis": { + "type": "integer", + "format": "int32", + "description": "Key rotation check in milliseconds." + }, + "keyRotationRebootDelayMillis": { + "type": "integer", + "format": "int32", + "description": "Key rotation reboot delay in milliseconds." + }, + "tpmWaitRetrySeconds": { + "type": "integer", + "format": "int32", + "description": "TPM wait retry in seconds." + }, + "tpmWaitNumTries": { + "type": "integer", + "format": "int32", + "description": "TPM wait number of tries." + }, + "keyRotationTaskStatusCheckIntervalMillis": { + "type": "integer", + "format": "int32", + "description": "Key rotation task status check interval in milliseconds." + }, + "kmipConfigurationDir": { + "type": "string", + "description": "KMIP configuration dir." + }, + "kmipClientKeyFile": { + "type": "string", + "description": "KMIP client key file." + }, + "kmipClientCertFile": { + "type": "string", + "description": "KMIP client cert file." + }, + "kmipServerCertDir": { + "type": "string", + "description": "KMIP server cert directory." + }, + "kmipConfigurationFile": { + "type": "string", + "description": "KMIP configuration file." + }, + "kmipSetFileRetryIntervalMillis": { + "type": "integer", + "format": "int32", + "description": "KMIP set file retry interval in milliseconds." + }, + "kmipUtilCmd": { + "type": "string", + "description": "KMIP util command." + }, + "keyRotationWaitTimeoutSeconds": { + "type": "integer", + "format": "int32", + "description": "Key rotation wait timeout in seconds." + }, + "copyKekPasswordFilesCmd": { + "type": "string", + "description": "Copy kek password files command." + }, + "numKeyRotationThreads": { + "type": "integer", + "format": "int32", + "description": "Number of key rotation threads." + }, + "setWebServerCertRetryIntervalMillis": { + "type": "integer", + "format": "int32", + "description": "Web server cert retry interval in milliseconds." + }, + "signatureVerificationToolPath": { + "type": "string", + "description": "Signature verification tool path." + }, + "releaseSigningTrustedCertificatesFilePath": { + "type": "string", + "description": "Release signing trusted certificates file path." + }, + "fscryptUtilCmd": { + "type": "string", + "description": "fscrypt util command path." + }, + "allowConnectingToDevPolaris": { + "type": "boolean", + "description": "Whether to allow prod briks to connect to dev polaris." + } + } + }, + "LocalSnapshotConfig": { + "type": "object", + "properties": { + "tlkv_global_max_capacity": { + "type": "integer", + "format": "int32", + "description": "Maximum number of two-level kvstore index blocks which can be kept in memory caches overall across multiple kvstores. This will allow us to limit the overall memory which can be consumed by these kvstores globally." + }, + "tlkv_global_target_capacity": { + "type": "integer", + "format": "int32", + "description": "This is the target number to which the total number of cached in memory two-level kvstore index blocks will be brought down to when the maximum overall limit is hit." + } + } + }, + "LocalStormConfig": { + "type": "object", + "properties": { + "awsLincImageName": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching linux converter." + }, + "awsWincImageName": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching windows converter." + }, + "awsBoltImageName": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching Bolt." + }, + "awsRivetImageName": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching Rivet." + }, + "azureLincImageName": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching linux converter." + }, + "azureWincImageName": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching windows converter." + }, + "azureBoltImageName": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching Bolt." + }, + "awsLincImageNameInternal": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching linux converter. It will only be used when the cluster version contains commit hash." + }, + "awsWincImageNameInternal": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching windows converter. It will only be used when the cluster version contains commit hash." + }, + "awsBoltImageNameInternal": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching Bolt. It will be used only when cluster version contains commit hash." + }, + "awsRivetImageNameInternal": { + "type": "string", + "description": "Name of the AMI that will be used in AWS for launching Rivet. It will be used only when cluster version contains commit hash." + }, + "azureLincImageNameInternal": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching linux converter. It will only be used when the cluster version contains commit hash." + }, + "azureWincImageNameInternal": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching windows converter. It will only be used when the cluster version contains commit hash." + }, + "azureBoltImageNameInternal": { + "type": "string", + "description": "Name of the image that will be used in Azure for launching Bolt. It will only be used when cluster version contains commit hash." + }, + "azureRegionToImageCredentials": { + "type": "string", + "description": "This represents the mapping of azure region to images uri and storage account sas key." + }, + "defaultAzureRegionForCopyComputeImages": { + "type": "string", + "description": "If the specified region is not mentioned in azureRegionToImageCredentials then we will use this region to copy compute images to customer region." + }, + "awsComputeImagesAccountCredentials": { + "type": "string", + "description": "This represents the access key of AWS public cloud account." + }, + "connectivityCheckEc2InstanceType": { + "type": "string", + "description": "Inexpensive instance type for cloud compute connectivity check." + }, + "stormInstanceJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent storm instance job per node." + }, + "rubrikBoltEc2InstanceTypeForCloudOn": { + "type": "string", + "description": "Instance type for launching rubrik bolt in aws for cloudOn." + }, + "rubrikRivetEc2InstanceTypeForCloudOn": { + "type": "string", + "description": "Instance type for launching rubrik Rivet in aws for cloudOn." + }, + "rubrikBoltEc2InstanceTypeForCloudNative": { + "type": "string", + "description": "Instance type for launching rubrik bolt in aws for CloudNative applications." + }, + "rubrikBoltEc2InstanceTypeForAppFlows": { + "type": "string", + "description": "Instance type for launching rubrik bolt in aws for AppFlows applications." + }, + "rubrikBoltEc2InstanceTypeForArchivalOperation": { + "type": "string", + "description": "Instance type of launching bolt instance in aws for archival related operations like consolidation, reverse, etc." + }, + "rubrikConverterEc2InstanceTypeForCloudOn": { + "type": "string", + "description": "Instance type for launching converter in aws for CloudOn." + }, + "rubrikConverterEc2InstanceTypeForAppFlows": { + "type": "string", + "description": "Instance type for launching converter in aws for Appflows." + }, + "azureConverterVmSize": { + "type": "string", + "description": "Size of the azure instance to launch for converter." + }, + "azureBoltVmSize": { + "type": "string", + "description": "Size of the azure bolt instance to launch for operations not related to archival." + }, + "azureBoltVmSizeForArchivalOperation": { + "type": "string", + "description": "Size of the azure bolt instance to launch for archival related operations like consolidation, reverse, etc." + }, + "maxLongRunningConvertersAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running converters allowed to run at a single time." + }, + "maxShortRunningConvertersAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running converters allowed to run at a single time." + }, + "maxLongRunningBoltsAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running bolts allowed to run at a single time." + }, + "maxShortRunningBoltsAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running bolts allowed to run at a single time." + }, + "maxLongRunningRivetsAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running Rivets allowed to run at a single time." + }, + "maxShortRunningRivetsAllowed": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running Rivets allowed to run at a single time." + }, + "maxLongRunningConvertersAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running converters allowed per data location to run at a single time." + }, + "maxShortRunningConvertersAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running converters allowed per data location to run at a single time." + }, + "maxLongRunningBoltsAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running bolts allowed per data location to run at a single time." + }, + "maxShortRunningBoltsAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running bolts allowed per data location to run at a single time." + }, + "maxLongRunningRivetsAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of long running Rivets allowed per data location to run at a single time." + }, + "maxShortRunningRivetsAllowedPerDataLocation": { + "type": "integer", + "format": "int32", + "description": "The maximum number of short running Rivets allowed per data location to run at a single time." + }, + "setupDisksWaitTimeInMillis": { + "type": "integer", + "format": "int32", + "description": "Waiting time in storm manager before calling setupDisks on bolt to allow node monitor to scan for disks." + } + } + }, + "LocalSupportConfig": { + "type": "object", + "properties": { + "supportTunnelEnabledFile": { + "type": "string", + "description": "If this file exists, the support tunnel is enabled and will be maintained by NodeMonitor. The file contains the inactivity timeout in seconds." + }, + "supportTunnelPortFile": { + "type": "string", + "description": "File containing support tunnel port number." + }, + "supportTunnelControlPath": { + "type": "string", + "description": "Support tunnel SSH control path." + }, + "supportTunnelActivityFile": { + "type": "string", + "description": "File containing timestamp of latest tunnel/console activity in milliseconds." + }, + "supportTunnelWaitMillis": { + "type": "integer", + "format": "int32", + "description": "How long (in milliseconds) SprayServer should wait between attempts to poll the port file to see if NodeMonitor has opened the tunnel." + }, + "supportTunnelWaitTries": { + "type": "integer", + "format": "int32", + "description": "How many attempts SprayServer should make to poll the port file." + }, + "supportTunnelFrequencyMillis": { + "type": "integer", + "format": "int32", + "description": "How often NodeMonitor should check and maintain the support tunnel." + }, + "supportTunnelServerAliveIntervalSecs": { + "type": "integer", + "format": "int32", + "description": "Support tunnel ssh connection's ServerAliveInterval in seconds." + }, + "supportTunnelServerAliveCountMax": { + "type": "integer", + "format": "int32", + "description": "How many unanswered probes are sent by support tunnel ssh before it quits." + }, + "supportTunnelStartPort": { + "type": "integer", + "format": "int32", + "description": "Start port for support tunnels." + }, + "numSupportTunnelPorts": { + "type": "integer", + "format": "int32", + "description": "Number of ports available for support tunnels." + }, + "minSupportTunnelTimeoutInSecs": { + "type": "integer", + "format": "int32", + "description": "Minimum time in seconds for support tunnel timeout." + }, + "supportKeyFile": { + "type": "string", + "description": "SSH Key file for accessing EC2 support tunnel." + }, + "supportLocalSshPort": { + "type": "integer", + "format": "int32", + "description": "Support tunnel local SSH port." + }, + "supportClusterSshPort": { + "type": "integer", + "format": "int32", + "description": "Support tunnel cluster SSH port." + }, + "supportLocalSshUser": { + "type": "string", + "description": "Username support uses to access the node." + }, + "supportClusterSshPubkey": { + "type": "string", + "description": "Pubkey support uses to access the cluster." + }, + "chiselSupportServer": { + "type": "string", + "description": "Host of the local Chisel client." + }, + "chiselSupportPort": { + "type": "integer", + "format": "int32", + "description": "Chisel client listening port for support tunnels." + }, + "supportServer": { + "type": "string", + "description": "Support server." + }, + "supportPort": { + "type": "integer", + "format": "int32", + "description": "Support server port." + }, + "supportUser": { + "type": "string", + "description": "Support user name." + }, + "timeoutToWaitForFullInSecs": { + "type": "integer", + "format": "int32", + "description": "Timeout to wait for full in seconds." + }, + "statsUploadEnabled": { + "type": "boolean", + "description": "Enable stats upload." + }, + "liveStatsTarget": { + "type": "string", + "description": "Live stats target." + }, + "logUploadBucket": { + "type": "string", + "description": "S3 Bucket to upload logs." + }, + "productMetricsUploadBucket": { + "type": "string", + "description": "S3 Bucket to upload product metrics." + }, + "logUploadRegion": { + "type": "string", + "description": "S3 Bucket Region to upload logs." + }, + "logUploadAccessKey": { + "type": "string", + "description": "S3 Access Keys for uploading support bundle. These keys only have write permission to the bucket." + }, + "logUploadSecretKey": { + "type": "string", + "description": "S3 Secret Keys for uploading support bundle." + }, + "metricsConfigFilePath": { + "type": "string", + "description": "Metrics metadata configuration path." + }, + "supportMetadataFilePath": { + "type": "string", + "description": "Support metadata configuration path." + }, + "logArchivalWorkingDir": { + "type": "string", + "description": "Working dir for log archival." + }, + "enableTableDump": { + "type": "boolean", + "description": "Enable table dump." + }, + "teleportChiselRelayPort": { + "type": "integer", + "format": "int32", + "description": "Teleport chisel relay port." + }, + "teleportChiselApiPort": { + "type": "integer", + "format": "int32", + "description": "Teleport chisel API port." + }, + "teleportApiPort": { + "type": "integer", + "format": "int32", + "description": "Teleport API port." + }, + "teleportAuthorizedKeysFile": { + "type": "string", + "description": "Teleport authorized keys file." + }, + "teleportConf": { + "type": "string", + "description": "Teleport conf file location." + }, + "teleportAgentBinary": { + "type": "string", + "description": "Teleport agent binary file." + }, + "supportTunnelStatusFile": { + "type": "string", + "description": "Support tunnel status file." + } + } + }, + "LocalThorConfig": { + "type": "object", + "properties": { + "s3MaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to S3 when resumable multipart uploader is not enabled." + }, + "s3MaxConcurrentChunksForMultipartResumableUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to S3 when resumable multipart uploader is enabled. This is different for S3 because we use S3 native multipart upload for each chunk of the file (which in turn is multithreaded) and we want to keep the total number of threads bounded." + }, + "s3MaxThreadCountForResumableMultipartUploadChunk": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload each individual chunk of a resumable multipart upload file to S3." + }, + "azureMaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to Azure." + }, + "googleMaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to Google." + }, + "nfsMaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to NFS." + }, + "glacierMaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to Glacier." + }, + "qstarMaxThreadCountForUpload": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to upload a file to Qstar." + }, + "archivalServiceCacheSize": { + "type": "integer", + "format": "int32", + "description": "Number of entries in the archival service Cache." + }, + "archivalServiceNonCacheAbleCacheSize": { + "type": "integer", + "format": "int32", + "description": "Number of entries in the archival service for keys set to be non-cacheable." + }, + "archivalServiceCacheDefaultConcurrencyLevel": { + "type": "integer", + "format": "int32", + "description": "This is the default concurrency level of all guava caches defined in archival service helper." + }, + "maxNumConvertDiskFormatJobs": { + "type": "integer", + "format": "int32", + "description": "Max number of ConvertDiskFormat jobs allowed per node." + }, + "maxNumUploadFileJobs": { + "type": "integer", + "format": "int32", + "description": "Max number of UploadFile jobs allowed per node." + }, + "instantiateJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent instantiate jobs per node." + }, + "tierSnapshotsJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent tier snapshots jobs per node." + }, + "imageConversionJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent image conversion jobs per node." + }, + "maxNumOfConsolidatePatchFileJobs": { + "type": "integer", + "format": "int32", + "description": "Max no of ConsolidatePatchFile jobs allowed per node." + }, + "cloudImageConverterServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Port number on the converter on which the converter service is running." + }, + "cloudInstanceServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Port number on the temporary instance on which the instance service is running." + }, + "archivalKeyDownloadBatchSize": { + "type": "integer", + "format": "int32", + "description": "Batch size for archival batch downloads." + }, + "recoverArchivedMetadataJobThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads used by fetch archived metadata task." + }, + "reversePatchFilesDownloadThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of threads to use when downloading files for the Reverse Patch Files job." + }, + "maxNumReversePatchFilesJobs": { + "type": "integer", + "format": "int32", + "description": "Max number of ReversePatchFiles jobs allowed per node." + }, + "maxNumMaterializeFullJobs": { + "type": "integer", + "format": "int32", + "description": "Maximum number of MaterializeFull jobs allowed per node." + }, + "patchFileCreateParallelismInUpload": { + "type": "integer", + "format": "int32", + "description": "Value indicating the degree of parallelism desired in the patch file creation stage during pipelined uploads." + }, + "patchFileCreateQueueDepthInUpload": { + "type": "integer", + "format": "int32", + "description": "Value representing the queue depth for the patch file create stage. For details, refer to the implementation of PipelinedWorkExecutor. For basic usage, please note that a lower value would prevent more patchCreate tasks from being performed in case patchUpload is taking a long time. On the other hand, a higher value would allow more patchCreate tasks to be undertaken." + }, + "patchFileUploadParallelismInUpload": { + "type": "integer", + "format": "int32", + "description": "Value indicating the degree of parallelism desired in the patch file upload stage during execute upload." + }, + "patchFileUploadQueueDepthInUpload": { + "type": "integer", + "format": "int32", + "description": "Value representing the queue depth for the patch file upload stage. For details, refer to the implementation of PipelinedWorkExecutor. Basic intuition regarding this value is similar to the one for patchFileCreateQueueDepthInUpload." + }, + "subObjectsToUploadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of subobjects that can be uploaded in parallel." + }, + "nonShardedSubObjectMultiplierForUpload": { + "type": "integer", + "format": "int32", + "description": "When uploading non-sharded snappables, apply this multiplier to compute the number of subObjects to upload concurrently." + }, + "subObjectsToDownloadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of subobjects that can be downloaded in parallel." + }, + "maxSubObjectBlobsToDownloadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of sub object blobs that can be downloaded in parallel." + }, + "maxShardsToDownloadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of sharded blobstore shards that can be downloaded in parallel. This shard level concurrency is across all subgroups being downloaded. Meaning that if there are 4 subgroups being downloading concurrently and the shard level concurrency is set to 8, on average there will be 2 threads to download shards for each subgroup." + }, + "maxShardsToDownloadInParallelForTwoStep": { + "type": "integer", + "format": "int32", + "description": "Count of sharded blobstore shards that can be downloaded in parallel, when running the two step download mechanism. This shard level concurrency is across all subgroups being downloaded. Meaning that if there are 4 subgroups being downloaded concurrently and the shard level concurrency is set to 8, on average there will be 2 threads to download shards for each subgroup." + }, + "subObjectsToConvertForInstantiationInParallel": { + "type": "integer", + "format": "int32", + "description": "Number of sub objects that can be operated on for instantiation conversion / upload tasks in parallel." + }, + "qstarSubObjectsToUploadInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of subobjects that can be uploaded in parallel to QStar archival." + }, + "readerRecoveryMapReduceMaxChildInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of reader recovery job child instances that can be scheduled concurrently per node." + }, + "tierExistingSnapshotsMaxChildInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of tier existing snapshots job child instances that can be scheduled concurrently per node." + }, + "tierExistingSnapshotsJobInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of TIER_EXISTING_SNAPSHOTS job that can run concurrently per node." + }, + "archivalClearFetchedMetadataJobInstancesPerNode": { + "type": "integer", + "format": "int32", + "description": "Maximum number of ArchivalClearFetchedMetadata job instances that can be scheduled concurrently per node." + }, + "cloudStorageServiceNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files for CloudStorageService." + }, + "cloudStorageServiceHeapSizeLowLimit": { + "type": "string", + "description": "Low limit for heap size for CloudStorageService (ideally to be scaled using local config)." + }, + "cloudStorageServiceHeapSizeHighLimit": { + "type": "string", + "description": "High limit for heap size for CloudStorageService (ideally to be scaled using local config)." + }, + "cloudStorageServicePerThreadStackSize": { + "type": "string", + "description": "Per thread stack size for CloudStorageService." + }, + "cloudStorageServiceDebuggerPort": { + "type": "integer", + "format": "int32", + "description": "Debugger port number for CloudStorageService." + }, + "cloudStorageServiceThriftPort": { + "type": "integer", + "format": "int32", + "description": "Thrift port number for CloudStorageService." + }, + "transferFpAndExtentIndexFileConcurrency": { + "type": "integer", + "format": "int32", + "description": "A value for how many subgroup fingerprint and extent index files should be tranferred concurrently in EXPOSE_UPLOAD task when an UPLOAD job is finishing. This is useful for snappables with a large number of subgroups." + }, + "uploadJobDelayRangeInSec": { + "type": "integer", + "format": "int32", + "description": "Delay range in seconds to randomly pick a delay from when the upload job cannot acquire resources." + }, + "lpfiracPrefetchThreadCountCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to prefetch data (per cache instance) by the logical patch file image read ahead cache. This cache is used by the two step download mechanism for all cloud locations including S3, Azure and GCP." + }, + "lpfiracPrefetchCacheSizeInMbCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max amount of data to prefetch in Mega Bytes (per cache instance) by the logical patch file image read ahead cache. This cache is used by the two step download mechanism for all cloud locations including S3, Azure and GCP." + }, + "lpfiracPrefetchThreadCountNonCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max number of threads used to prefetch data (per cache instance) by the logical patch file image read ahead cache. This cache is used by the two step download mechanism for all non cloud locations including NFS and S3 Compatible object stores." + }, + "lpfiracPrefetchCacheSizeInMbNonCloudLocations": { + "type": "integer", + "format": "int32", + "description": "Max amount of data to prefetch in Mega Bytes (per cache instance) by the logical patch file image read ahead cache. This cache is used by the two step download mechanism for all non cloud locations including NFS and S3 Compatible object stores." + }, + "cloudStorageServiceWorkerThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of worker threads used by the storage server in Cloud Storage Service." + }, + "cloudStorageServiceSelectorThreadCount": { + "type": "integer", + "format": "int32", + "description": "Number of selector threads used by the storage server in Cloud Storage Service." + }, + "subObjectsToMigrateInParallel": { + "type": "integer", + "format": "int32", + "description": "Count of subobjects that can be migrated in parallel for a single snappable." + }, + "archivalIntegrityReportJobInMemorySemShares": { + "type": "integer", + "format": "int32", + "description": "Maximum number of concurrent archival integrity report jobs per node." + } + } + }, + "LocalVmwareConfig": { + "type": "object", + "properties": { + "snapshotServerIp": { + "type": "string", + "description": "Snapshot server IP address." + }, + "snapshotServerPort": { + "type": "integer", + "format": "int32", + "description": "Snapshot server port." + }, + "vixServerMaxPorts": { + "type": "integer", + "format": "int32", + "description": "The number ports available (starting at vixServerPortsNum + 1) for vix service forked by snapshot service to handle vix related jobs." + }, + "vixServerSleepSecs": { + "type": "integer", + "format": "int32", + "description": "The number of seconds we sleep in between retries to connect to the forked vix disk server." + }, + "vixServerNumRetries": { + "type": "integer", + "format": "int32", + "description": "The number of retry attempts to connect to the forked vix disk server." + }, + "snapshotServerLogBufLevel": { + "type": "integer", + "format": "int32", + "description": "Snapshot server log buffer level." + }, + "snapshotServerLogVerbosity": { + "type": "integer", + "format": "int32", + "description": "Snapshot server log verbosity." + }, + "snapshotServerNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files in snapshot server." + }, + "snapshotServerExecInstrumentation": { + "type": "string", + "description": "Snapshot server execution instrumentation." + }, + "snapshotServerHashThreads": { + "type": "integer", + "format": "int32", + "description": "Number of hash threads in snapshot server." + }, + "patchFileCompressionForIngestJob": { + "type": "integer", + "format": "int32", + "description": "Patch file compression for ingest job." + }, + "patchFileCompressionForAllOtherJobs": { + "type": "integer", + "format": "int32", + "description": "Patch file compression for all other jobs." + }, + "useFingerprints": { + "type": "boolean", + "description": "True if we want to use fingerprints." + }, + "replicationThriftTokenLengthInBytes": { + "type": "integer", + "format": "int32", + "description": "Number of random bytes for the token before base64 enconding." + }, + "snapshotCancelationQuantumInBytes": { + "type": "integer", + "format": "int32", + "description": "Snapshot cancelation quantum in bytes." + }, + "pyVmwareServerThriftPort": { + "type": "integer", + "format": "int32", + "description": "Py Vmware Server thrift port." + }, + "vddkServerNumLogFiles": { + "type": "integer", + "format": "int32", + "description": "Number of log files in vddk server." + }, + "mountBootUpTimeoutInSeconds": { + "type": "integer", + "format": "int32", + "description": "Mount boot up timeout in seconds." + }, + "takeScreenShotOnBootUpFailures": { + "type": "boolean", + "description": "Whether to take screenshot on boot up failures." + }, + "refreshJobMaxFailedCount": { + "type": "integer", + "format": "int32", + "description": "A refresh will be run every x minutes, where x is the value of this param." + }, + "refreshJobMaxRunTimeInMins": { + "type": "integer", + "format": "int32", + "description": "Refresh job maximum run time in minutes." + }, + "disableNetworkOnClonedVMsByDefault": { + "type": "boolean", + "description": "Whether to disable network on mount/export." + }, + "exportCleanupDisabled": { + "type": "boolean", + "description": "Whether export cleanup is disabled." + }, + "mountCleanupDisabled": { + "type": "boolean", + "description": "Whether mount cleanup is disabled." + }, + "maxConcurrentExportStreamsPerVm": { + "type": "integer", + "format": "int32", + "description": "Maximum concurrency in import/export disk." + }, + "vddkMinusOneDomainSocketPath": { + "type": "string", + "description": "Domain socket path for VDDK minus one server." + }, + "vddkDomainSocketPath": { + "type": "string", + "description": "Domain socket path for VDDK server." + }, + "useSDFSReadAheadForMigration": { + "type": "boolean", + "description": "use SDFS read ahead for migration datastore job to speed up." + } + } + }, + "SparkSettableGlobalCrystalConfig": { + "type": "object", + "properties": { + "isEmailNotificationEnabled": { + "type": "boolean", + "description": "Determines whether to send email notification to user or not." + }, + "enableDataClassificationPreviewer": { + "type": "boolean", + "description": "Whether to enable the Data Classification Previewer page." + } + } + }, + "SparkSettableGlobalLambdaConfig": { + "type": "object", + "properties": { + "enableAutomaticFmdUpload": { + "type": "boolean", + "description": "Whether to enable automatic upload of Filesystem Metadata for newly indexed snapshots." + }, + "enableLambdaParserService": { + "type": "boolean", + "description": "Whether to enable the lambda parser service." + }, + "lambdaParserServiceLimitInBytes": { + "type": "integer", + "format": "int32", + "description": "Parsed output size limit in bytes." + }, + "analyzeSnappableUmlGuestMemoryInMb": { + "type": "integer", + "format": "int32", + "description": "Amount of memory for UML guest in MB." + }, + "tikaForkParserWorkerMemoryInMb": { + "type": "integer", + "format": "int32", + "description": "Amount of memory for Tika fork parser worker in MB." + }, + "tikaForkParserAcquireWaitDurationMs": { + "type": "integer", + "format": "int32", + "description": "Amount of time to wait to acquire a ForkClient." + }, + "useLiteTikaParserConfig": { + "type": "boolean", + "description": "Use Tika config that has lighter resource usage." + }, + "tikaForkParserWorkerMaxFilesProcessed": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files a tika fork worker can process before it is forcibly shut down." + }, + "ransomwareAnalysisTimeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum duration, in seconds, of a ransomware analysis job." + }, + "lambdaAnalysisJobRetryLimit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of times to retry any lambda analysis job." + }, + "contentAnalysisTimeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum duration, in seconds, of a content analysis job." + }, + "enableFmdUploadForAllResources": { + "type": "boolean", + "description": "Whether to enable upload of filesystem metadata for all resources." + }, + "defaultDiffFmdUploadPrefix": { + "type": "string", + "description": "Prefix that is prepended to uploaded differential filesystem metadata." + }, + "defaultFullFmdUploadPrefix": { + "type": "string", + "description": "Prefix that is prepended to uploaded full filesystem metadata." + }, + "maxSnapshotsToUploadAutomatically": { + "type": "integer", + "format": "int32", + "description": "Maximum number of snapshots to upload at once when the filesystem metadata is uploaded automatically." + }, + "contentAnalysisPathBatchSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of paths to iterate over in a single call to LambdaServer. This prevents too much time spent in any single call; instead the call will return with a path cursor for the start of the next batch." + }, + "contentAnalysisAnalyzableBatchSize": { + "type": "integer", + "format": "int32", + "description": "Maximum number of files to analyze in a single call to LambdaServer. This prevents too much time spent in any single call; instead the call will return with a path cursor for the start of the next batch." + }, + "contentAnalysisMaxFilesBeforeMerge": { + "type": "integer", + "format": "int32", + "description": "Number of SSTables to keep around before preemptively issuing a merge prior to job completion. This intends to limit the number of files in a directory so that SDFS is not impacted. This number is also not a hard limit, so we could possibly exceed this value temporarily." + }, + "contentAnalysisJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. Content analysis jobs will run at the specified interval for any snappables that have periodic content analysis policies configured." + }, + "enableContentAnalysisChildJobRetry": { + "type": "boolean", + "description": "Enable child job retries. Allows us to force fail a child job if we need to in production without losing the progress." + }, + "sendAuditConfigJobIntervalInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. SendAuditConfig job will periodically send the AuditConfig to all Windows Hosts that have Auditing enabled." + } + } + }, + "SparkSettableGlobalPolarisConfig": { + "type": "object", + "properties": { + "useEncryptionForCloudStorageUpload": { + "type": "boolean", + "description": "use encryption for cloud storage upload." + } + } + }, + "UserSettableGlobalCerebroConfig": { + "type": "object", + "properties": { + "systemStorageNotificationThreshold": { + "type": "integer", + "format": "int32", + "description": "Initial warning threshold which triggers a system storage notification. The system sends a notification when the percentage of used storage capacity meets or exceeds the initial warning threshold. After the notification is sent, storage notifications are deactivated until the percentage of capacity drops below the warning reset value, or the percentage of used storage capacity meets or exceeds the higher secondary warning threshold value." + }, + "systemStorageNotificationResendThreshold": { + "type": "integer", + "format": "int32", + "description": "Secondary warning threshold which triggers additional system storage notifications. While the percentage of used storage capacity meets or exceeds the secondary warning threshold, the system continues to send storage notifications at regular specified intervals. The value of the secondary warning threshold must be greater than or equal to the value of the initial warning threshold." + }, + "systemStorageNotificationResendTimePeriodInMinutes": { + "type": "integer", + "format": "int32", + "description": "Numbers of minutes to wait between secondary warning threshold notifications." + }, + "systemStorageThresholdNotificationReset": { + "type": "integer", + "format": "int32", + "description": "Warning reset value for system storage notifications. The value represents a percentage of used storage capacity which triggers a reset of system storage notifications. When storage capacity drops below the warning reset value the initial warning threshold is reactivated. The warning reset value must be less than or equal to the value of the initial warning threshold." + }, + "systemStorageNotificationEnabled": { + "type": "boolean", + "description": "Used System Storage capacity notification. Set to true to enable notification. Set to false to disable notification." + } + } + }, + "UserSettableGlobalCrystalConfig": { + "type": "object", + "properties": { + "webSessionTimeoutMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes a web session can be idle before token expiration." + }, + "webSessionExpirationTimeMinutes": { + "type": "integer", + "format": "int32", + "description": "Number of minutes before a web session is expired regardless of last usage time." + } + } + }, + "UserSettableGlobalForgeConfig": { + "type": "object", + "properties": { + "parallelNodeAddEnabled": { + "type": "boolean", + "description": "Determines whether to run node addition in parallel." + }, + "enableCustomHostname": { + "type": "boolean", + "description": "Feature flag to enable custom hostname feature." + } + } + }, + "UserSettableGlobalHypervConfig": { + "type": "object", + "properties": { + "migrateFastVirtualDiskBuild": { + "type": "boolean", + "description": "A boolean flag that controls the use of the fast VHDX builder during Hyper-V virtual machine migration. When the flag is 'true', the Hyper-V VM uses the fast VHDX builder the next time, VM is backed up. A value of false disables the fast VHDX builder. This flag is used in combination with the maxFullMigrationStoragePercentage value." + }, + "maxFullMigrationStoragePercentage": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the total available storage space. When performing a full hyperv VM backup operation would bring the total used storage space above this threshold, the cluster takes incremental backups instead. This value is used in combination with the migrateFastVirtualDiskBuild flag." + }, + "hypervRemoveUnknownCheckpoints": { + "type": "boolean", + "description": "Indicates whether to remove unknown checkpoints." + } + } + }, + "UserSettableGlobalShieldConfig": { + "type": "object", + "properties": { + "useTeleport": { + "type": "boolean", + "description": "Use teleport infrastructure to open support tunnel." + } + } + }, + "UserSettableGlobalVolumeGroupConfig": { + "type": "object", + "properties": { + "migrateFastVirtualDiskBuild": { + "type": "string", + "description": "A flag that controls the use of the fast VHDX builder during volume group migration. When the value of the flag is 'Error-Only,' the volume group uses the fast VHDX builder when a pre-5.1 volume group backup operation fails during the fetch phase. When the value of the flag is 'All,' the volume group uses the fast VHDX builder the next time the volume group is backed up. Any other value disables the fast VHDX builder. This flag is used in combination with the maxFullMigrationStoragePercentage value." + }, + "maxFullMigrationStoragePercentage": { + "type": "integer", + "format": "int32", + "description": "Specifies a percentage of the total available storage space. When performing a full volume group backup operation would bring the total used storage space above this threshold, the cluster takes incremental backups instead. This value is used in combination with the migrateFastVirtualDiskBuild flag." + } + } + }, + "InternalJobInstanceDetail": { + "type": "object", + "required": [ + "archived", + "id", + "isDisabled", + "jobType", + "nodeId", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the instance." + }, + "status": { + "type": "string", + "description": "Status of the job instance." + }, + "result": { + "type": "string", + "description": "Result of the job instance. Its meaning depends on the job type but is usually an ID." + }, + "errorInfo": { + "type": "string", + "description": "Error information of the job instance." + }, + "startTime": { + "type": "string", + "description": "Start time of the job instance." + }, + "endTime": { + "type": "string", + "description": "End time of the job instance." + }, + "jobType": { + "type": "string", + "description": "Type of the job." + }, + "nodeId": { + "type": "string", + "description": "ID of the node where the job runs." + }, + "jobProgress": { + "type": "number", + "format": "double", + "description": "The current progress in terms of percentage of the async request." + }, + "isDisabled": { + "type": "boolean", + "description": "Whether this job is disabled or not." + }, + "archived": { + "type": "boolean", + "description": "Whether this job instance has been archived." + }, + "childJobDebugInfo": { + "type": "string", + "description": "Some job types create other 'child' jobs to perform their work. Here we show information on how this job is being affected by its child jobs (if any)." + } + } + }, + "BackupJobConfig": { + "type": "object", + "required": [ + "vmId" + ], + "properties": { + "vmId": { + "type": "string", + "description": "ID of the VM to back up." + }, + "preferredReplicas": { + "type": "array", + "items": { + "type": "string" + } + }, + "isOnDemandSnapshot": { + "type": "boolean", + "description": "Indicates if snapshot is on-demand." + }, + "requestorId": { + "type": "string", + "description": "ID of the user who triggered the job." + }, + "config": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + }, + "CloudExpirationJobConfig": { + "type": "object", + "required": [ + "vmId" + ], + "properties": { + "vmId": { + "type": "string", + "description": "ID of the VM for which we are expiring snapshots." + } + } + }, + "CloudUploaderJobConfig": { + "type": "object", + "required": [ + "vmId" + ], + "properties": { + "vmId": { + "type": "string", + "description": "ID of the VM to start sending to cloud." + } + } + }, + "ExpireSnapshotJobConfig": { + "type": "object", + "required": [ + "snappableId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID of the snappable to expire snapshots for." + } + } + }, + "InstancesConfig": { + "type": "object", + "required": [ + "jobId" + ], + "properties": { + "jobId": { + "type": "string", + "description": "job id of the job." + }, + "status": { + "type": "string", + "description": "Status of the job." + } + } + }, + "JobRuntime": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "Composite job id of the job." + }, + "status": { + "type": "string", + "description": "Status of the job." + }, + "estimatedTimeRemaining": { + "type": "integer", + "format": "int32", + "description": "Estimated time remaining (in seconds) for the job." + } + } + }, + "LogMessage": { + "type": "object", + "required": [ + "component", + "logLevel", + "message" + ], + "properties": { + "logLevel": { + "type": "string", + "description": "One of FATAL, ERROR, WARNING, INFO, TRACE." + }, + "component": { + "type": "string", + "description": "Component generating the log." + }, + "message": { + "type": "string", + "description": "Message to be logged." + } + } + }, + "ClusterConfigRec": { + "type": "object", + "required": [ + "id", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "id": { + "type": "integer", + "format": "int64" + } + } + }, + "ClusterIpConfig": { + "type": "object", + "required": [ + "gateway", + "netmask" + ], + "properties": { + "netmask": { + "type": "string" + }, + "gateway": { + "type": "string" + } + } + }, + "ClusterIpRec": { + "type": "object", + "required": [ + "id", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "id": { + "type": "integer", + "format": "int64" + } + } + }, + "FailureToleranceStatus": { + "type": "object", + "required": [ + "brik", + "disk", + "node" + ], + "properties": { + "brik": { + "type": "integer", + "format": "int32", + "description": "Number of brik failures allowed in the cluster under which the cluster remains fully functional." + }, + "node": { + "type": "integer", + "format": "int32", + "description": "Number of node failures allowed in the cluster under which the cluster remains fully functional." + }, + "disk": { + "type": "integer", + "format": "int32", + "description": "Number of disk failures allowed in the cluster under which the cluster remains fully functional." + } + } + }, + "ProxyConfig": { + "type": "object", + "required": [ + "host", + "protocol" + ], + "properties": { + "host": { + "type": "string" + }, + "port": { + "type": "integer", + "format": "int32" + }, + "protocol": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + } + } + }, + "ProxyConfigGet": { + "type": "object", + "required": [ + "host", + "protocol" + ], + "properties": { + "host": { + "type": "string" + }, + "port": { + "type": "integer", + "format": "int32" + }, + "protocol": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "ReplaceNodeConfig": { + "type": "object", + "required": [ + "newNodeId", + "oldNodeId", + "preserveHdds" + ], + "properties": { + "newNodeId": { + "type": "string" + }, + "oldNodeId": { + "type": "string" + }, + "preserveHdds": { + "type": "boolean" + }, + "ipmiPassword": { + "type": "string", + "x-secret": true + }, + "encryptionPassword": { + "type": "string", + "x-secret": true + }, + "isIpv4ManualDiscoveryMode": { + "type": "boolean" + } + } + }, + "ReplaceNodeRec": { + "type": "object", + "required": [ + "jobId", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "jobId": { + "type": "string" + } + } + }, + "ReplaceNodeStatus": { + "type": "object", + "required": [ + "ipConfig", + "ipmiConfig", + "message", + "metadataSetup", + "setupDisks", + "setupEncryptionAtRest", + "startServices", + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "message": { + "type": "string" + }, + "ipConfig": { + "type": "string" + }, + "metadataSetup": { + "type": "string" + }, + "startServices": { + "type": "string" + }, + "ipmiConfig": { + "type": "string" + }, + "setupDisks": { + "type": "string" + }, + "setupEncryptionAtRest": { + "type": "string" + } + } + }, + "RouteConfig": { + "type": "object", + "required": [ + "device", + "gateway", + "netmask", + "network" + ], + "properties": { + "network": { + "type": "string" + }, + "netmask": { + "type": "string" + }, + "gateway": { + "type": "string" + }, + "device": { + "type": "string" + } + } + }, + "RouteDeletionConfig": { + "type": "object", + "required": [ + "netmask", + "network" + ], + "properties": { + "network": { + "type": "string" + }, + "netmask": { + "type": "string" + } + } + }, + "UnbootstrappedNodeInfo": { + "type": "object", + "required": [ + "gateway", + "ipAddress", + "netmask", + "nodeName", + "provisionedName" + ], + "properties": { + "nodeName": { + "type": "string", + "description": "Rubrik CDM node ID." + }, + "provisionedName": { + "type": "string", + "description": "The name assigned to the node during provisioning in the cloud." + }, + "ipAddress": { + "type": "string", + "description": "The primary IP address on bond0." + }, + "netmask": { + "type": "string", + "description": "The netmask of the primary IP address on bond0." + }, + "gateway": { + "type": "string", + "description": "The default gateway assigned to bond0." + } + } + }, + "DownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "Array with the full paths of files and folders to download.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "ExportPathPair": { + "type": "object", + "required": [ + "dstPath", + "srcPath" + ], + "properties": { + "srcPath": { + "type": "string", + "description": "Original file path." + }, + "dstPath": { + "type": "string", + "description": "Destination path of export files." + } + } + }, + "ExportSnapshotToStandaloneHostRequest": { + "allOf": [ + { + "$ref": "#/definitions/MountExportSnapshotJobCommonOptions" + }, + { + "type": "object", + "required": [ + "datastoreName", + "hostIpAddress", + "hostPassword", + "hostUsername" + ], + "properties": { + "hostIpAddress": { + "type": "string", + "description": "The IP address of the standalone ESXi host." + }, + "datastoreName": { + "type": "string", + "description": "Name of the datastore to assign to the exported. virtual machine." + }, + "hostUsername": { + "type": "string", + "description": "The admin username of standalone ESXi host." + }, + "hostPassword": { + "type": "string", + "description": "The admin password of standalone ESXi host." + } + } + } + ] + }, + "JobDetail": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the job." + }, + "jobType": { + "type": "string", + "description": "Type of the job." + }, + "affinity": { + "type": "string", + "description": "Affinity of the job." + }, + "cronExpression": { + "type": "string", + "description": "Cron expression if it is a one time job." + }, + "interval": { + "type": "string", + "description": "Interval that define the frequency of a scheduled job." + }, + "retries": { + "type": "integer", + "format": "int32" + }, + "enabled": { + "type": "boolean", + "description": "Flag to enable/disable a job." + } + } + }, + "JobInstanceDetail": { + "type": "object", + "required": [ + "id", + "isDisabled", + "jobType", + "nodeId", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the instance." + }, + "status": { + "type": "string", + "description": "Status of the job instance." + }, + "errorInfo": { + "type": "string", + "description": "Error information of the job instance." + }, + "startTime": { + "type": "string", + "description": "Start time of the job instance." + }, + "endTime": { + "type": "string", + "description": "End time of the job instance." + }, + "jobType": { + "type": "string", + "description": "Type of the job." + }, + "nodeId": { + "type": "string", + "description": "ID of the node where the job runs." + }, + "jobProgress": { + "type": "number", + "format": "double", + "description": "The current progress in terms of percentage of the async request." + }, + "isDisabled": { + "type": "boolean", + "description": "Whether this job is disabled or not." + } + } + }, + "JobUpdate": { + "type": "object", + "properties": { + "affinity": { + "type": "string", + "description": "Affinity of the job." + }, + "cronExpression": { + "type": "string", + "description": "Cron expression if it is a one time job." + }, + "interval": { + "type": "string", + "description": "Interval that define the frequency of a scheduled job." + }, + "retries": { + "type": "integer", + "format": "int32" + }, + "enabled": { + "type": "boolean", + "description": "Flag to enable/disable a job." + } + } + }, + "MountDiskJobConfig": { + "type": "object", + "properties": { + "targetVmId": { + "type": "string", + "description": "ID of the target virtual machine where the disks will be attached to. The default value will be the virtual machine of the snapshot." + }, + "vmdkIds": { + "type": "array", + "description": "The VMDK files to attach to the existing virtual machine. By default, this value is empty, which attaches all of the VMDKs in the snapshot to the target virtual machine.", + "items": { + "type": "string" + } + }, + "vlan": { + "type": "integer", + "format": "int32", + "description": "The VLAN used by the ESXi host to mount the datastore." + } + } + }, + "MountExportSnapshotJobCommonOptions": { + "type": "object", + "properties": { + "vmName": { + "type": "string", + "description": "Name of the new VM created by mount or export." + }, + "disableNetwork": { + "type": "boolean", + "description": "Sets the state of the network interfaces when the virtual machine is mounted or exported. Use 'false' to enable the network interfaces. Use 'true' to disable the network interfaces. Disabling the interfaces can prevent IP conflicts." + }, + "removeNetworkDevices": { + "type": "boolean", + "description": "Determines whether to remove the network interfaces from the mounted or exported virtual machine. Set to 'true' to remove all network interfaces. The default value is 'false'." + }, + "powerOn": { + "type": "boolean", + "description": "Determines whether the virtual machine should be powered on after mount or export. Set to 'true' to power on the virtual machine. Set to 'false' to mount or export the virtual machine but not power it on. The default is 'true'." + }, + "keepMacAddresses": { + "type": "boolean", + "description": "Determines whether the MAC addresses of the network interfaces on the source virtual machine are assigned to the new virtual machine. Set to 'true' to assign the original MAC addresses to the new virtual machine. Set to 'false' to assign new MAC addresses. The default is 'false'. When removeNetworkDevices is set to true, this property is ignored." + } + } + }, + "MountSnapshotJobConfigInternalTest": { + "allOf": [ + { + "$ref": "#/definitions/MountExportSnapshotJobCommonOptions" + }, + { + "type": "object", + "properties": { + "hostId": { + "type": "string", + "description": "ID of the ESXi host to mount the new virtual machine on." + }, + "resourcePoolId": { + "type": "string", + "description": "ID of the resource pool where the new virtual machine will be mounted." + }, + "clusterId": { + "type": "string", + "description": "ID of the compute cluster where the new virtual machine will be mounted." + }, + "dataStoreName": { + "type": "string", + "description": "Obsolete parameter." + }, + "vlan": { + "type": "integer", + "format": "int32", + "description": "VLAN ID for the VLAN ESXi host prefer to use for mounting the datastore." + }, + "createDatastoreOnly": { + "type": "boolean", + "description": "The job creates a datastore that contains the VMDK, but does not create the corresponding virtual machine." + }, + "shouldRecoverTags": { + "type": "boolean", + "description": "A boolean that specifies whether the job recovers the tags assigned to the virtual machine. When this value is true, the job recovers the tags." + }, + "nodeAffinity": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + ] + }, + "RestoreFilesJobConfig": { + "type": "object", + "required": [ + "restoreConfig" + ], + "properties": { + "destObjectId": { + "type": "string", + "description": "Managed ID of the destination object that the files should be recovered to." + }, + "restoreConfig": { + "type": "array", + "description": "Absolute file path and restore path if not restored back to itself.", + "items": { + "$ref": "#/definitions/VmRestorePathPair" + } + }, + "domainName": { + "type": "string", + "description": "Domain name (Use . for local admin)." + }, + "username": { + "type": "string", + "description": "Username." + }, + "password": { + "type": "string", + "description": "Password.", + "x-secret": true + }, + "shouldUseAgent": { + "type": "boolean", + "description": "A Boolean that specifies whether to use the Rubrik Backup Service or VMware tools to restore files. When 'true', the RBS restores files. When 'false',the VMware tools restores files.", + "default": true + }, + "ignoreErrors": { + "type": "boolean", + "description": "Optional Boolean field to determine whether to ignore errors during restore jobs that use the Rubrik Backup Service. When 'true', errors are ignored. Default value is 'false', errors are not ignored.", + "default": false + }, + "shouldSaveCredentials": { + "type": "boolean", + "description": "A Boolean value that specifies whether to save the user-entered credentials. When 'true', the user-entered credentials are saved." + }, + "shouldRestoreXAttrs": { + "type": "boolean", + "description": "Boolean value that determines restore file settings for Linux systems and for Windows systems. For Linux, use 'true' to include the extended attributes of restored files. For Windows, use 'true' to include alternate data streams for restored files. For both, use 'false' to exclude this additional metadata." + } + } + }, + "RestorePathPair": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "type": "string", + "description": "Original file path to be restored." + }, + "restorePath": { + "type": "string", + "description": "Directory of the folder to copy files into. If this is empty, file will be restored back into original directory." + } + } + }, + "VmRestorePathPair": { + "allOf": [ + { + "$ref": "#/definitions/RestorePathPair" + } + ] + }, + "ActiveDirectoryExportConfig": { + "type": "object", + "required": [ + "ldapServiceInfo", + "objectName", + "objectStorage", + "parallelizeJob" + ], + "properties": { + "objectName": { + "type": "string", + "description": "Object name on target storage location for storing AD export result." + }, + "parallelizeJob": { + "type": "boolean", + "description": "Whether or not the job should be parallelized." + }, + "ldapServiceInfo": { + "description": "LDAP Service information to connect to the correct LDAP server for querying AD for the export.", + "$ref": "#/definitions/LdapServiceInfo" + }, + "objectStorage": { + "description": "Target object storage location for the result of the AD export run.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "AnalyzeContentConfig": { + "type": "object", + "required": [ + "analyzerOptions", + "extWhitelist", + "sizeLimitInBytes", + "snappableId", + "uploadPrefix" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ManagedId of the snappable that will be analyzed." + }, + "extWhitelist": { + "type": "array", + "description": "File extensions that will be analyzed, e.g. [.docx, .pdf].", + "items": { + "type": "string" + } + }, + "sizeLimitInBytes": { + "type": "integer", + "format": "int32", + "description": "Maximum number of bytes to analyze in each file." + }, + "uploadPrefix": { + "type": "string", + "description": "Prefix to prepend to the object name." + }, + "analyzerOptions": { + "type": "string", + "description": "Options string that controls parameters of the various content analyzers.", + "x-secret": true + }, + "snapshotId": { + "type": "string", + "description": "ID of the snapshot that will be analyzed. Defaults to the latest indexed local snapshot if not specified." + }, + "previousSnapshotId": { + "type": "string", + "description": "ID of the snapshot to diff against." + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + }, + "timeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum number of seconds to spend on analysis." + }, + "enableInParallel": { + "type": "boolean", + "description": "Enable in parallel content analysis. The default value is false.", + "default": false + }, + "previewKey": { + "type": "string", + "description": "This can be the crawl ID or some other string to key the Hit Index for this crawl." + }, + "previewMetadata": { + "type": "string", + "description": "JSON-serialized blob of metadata used to support Previewer. CDM does not understand what this is. It is only used by Content Analyzer during the query codepath to process snippets for previewing.", + "x-secret": true + } + } + }, + "AnalyzeContentProgress": { + "type": "object", + "required": [ + "jobId", + "status" + ], + "properties": { + "jobId": { + "type": "string", + "description": "The ID of the job." + }, + "status": { + "type": "string", + "description": "Status of the job." + }, + "queueTime": { + "type": "string", + "format": "date-time", + "description": "The queue time of the job." + }, + "startTime": { + "type": "string", + "format": "date-time", + "description": "The start time of the job." + }, + "endTime": { + "type": "string", + "format": "date-time", + "description": "The end time of the job." + }, + "currentStage": { + "description": "The current stage the job is on.", + "$ref": "#/definitions/ContentAnalysisStage" + }, + "pendingStages": { + "type": "array", + "description": "List of stages that are still pending.", + "items": { + "$ref": "#/definitions/ContentAnalysisStage" + } + }, + "finishedStages": { + "type": "array", + "description": "List of stages that are finished.", + "items": { + "$ref": "#/definitions/ContentAnalysisStage" + } + }, + "stats": { + "description": "Stats corresponding to the progress of the job.", + "$ref": "#/definitions/ContentAnalysisStats" + }, + "error": { + "description": "Any errors that were encountered.", + "$ref": "#/definitions/RequestErrorInfo" + } + } + }, + "AnalyzeContentProgressRequest": { + "type": "object", + "required": [ + "jobIds" + ], + "properties": { + "jobIds": { + "type": "array", + "description": "List of job IDs for which to get progress.", + "items": { + "type": "string" + } + } + } + }, + "AnalyzeContentProgressResponse": { + "type": "object", + "required": [ + "progressInfos" + ], + "properties": { + "progressInfos": { + "type": "array", + "description": "List of AnalyzeContentProgress for each requested job.", + "items": { + "$ref": "#/definitions/AnalyzeContentProgress" + } + } + } + }, + "AnalyzePeriodicContentProgress": { + "type": "object", + "required": [ + "jobInstanceId", + "snappableId", + "status" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "Snappable managed ID." + }, + "jobInstanceId": { + "type": "integer", + "format": "int64", + "description": "Instance id of the job." + }, + "status": { + "type": "string", + "description": "Status of the snappable." + }, + "queueTime": { + "type": "string", + "format": "date-time", + "description": "The queue time of the job." + }, + "startTime": { + "type": "string", + "format": "date-time", + "description": "The start time of the job." + }, + "endTime": { + "type": "string", + "format": "date-time", + "description": "The end time of the job." + }, + "currentStage": { + "description": "The current stage the job is on.", + "$ref": "#/definitions/ContentAnalysisStage" + }, + "pendingStages": { + "type": "array", + "description": "List of stages that are still pending.", + "items": { + "$ref": "#/definitions/ContentAnalysisStage" + } + }, + "finishedStages": { + "type": "array", + "description": "List of stages that are finished.", + "items": { + "$ref": "#/definitions/ContentAnalysisStage" + } + }, + "stats": { + "description": "Stats corresponding to the progress of the job.", + "$ref": "#/definitions/ContentAnalysisStats" + }, + "error": { + "description": "Any errors that were encountered.", + "$ref": "#/definitions/RequestErrorInfo" + } + } + }, + "AnalyzePeriodicContentProgressRequest": { + "type": "object", + "required": [ + "snappableIds" + ], + "properties": { + "snappableIds": { + "type": "array", + "description": "List of snappable managed IDs for which to get progress.", + "items": { + "type": "string" + } + } + } + }, + "AnalyzePeriodicContentProgressResponse": { + "type": "object", + "required": [ + "progressInfos" + ], + "properties": { + "progressInfos": { + "type": "array", + "description": "List of AnalyzePeriodicContentProgress for requested jobs.", + "items": { + "$ref": "#/definitions/AnalyzePeriodicContentProgress" + } + } + } + }, + "ClassificationPreview": { + "type": "object", + "required": [ + "analyzerId", + "endIdx", + "policyIds", + "startIdx", + "text" + ], + "properties": { + "text": { + "type": "string", + "description": "Snippet of text." + }, + "startIdx": { + "type": "integer", + "format": "int32", + "description": "Start index of the hit within the text, inclusive." + }, + "endIdx": { + "type": "integer", + "format": "int32", + "description": "End index of the hit within the text, exclusive." + }, + "policyIds": { + "type": "array", + "items": { + "type": "string", + "description": "Policies that contain this analyzer." + } + }, + "analyzerId": { + "type": "string", + "description": "Analyzer ID of the hit." + } + } + }, + "ClassificationPreviewList": { + "type": "object", + "required": [ + "analyzerIdPairs", + "osDirectory", + "osFilename", + "osPath", + "policyIdPairs", + "policySummaries", + "previews", + "queryPath", + "snappableManagedId", + "snappableName" + ], + "properties": { + "queryPath": { + "type": "string", + "description": "Standardized path of the file." + }, + "snappableManagedId": { + "type": "string", + "description": "Managed ID of the snappable." + }, + "snappableName": { + "type": "string", + "description": "Name of the snappable." + }, + "osPath": { + "type": "string", + "description": "OS native path of the file." + }, + "osDirectory": { + "type": "string", + "description": "OS native path of the file, directory only." + }, + "osFilename": { + "type": "string", + "description": "OS native path of the file, filename only." + }, + "analyzerIdPairs": { + "type": "array", + "items": { + "description": "Pairs of analyzer IDs with analyzer names.", + "$ref": "#/definitions/IdNamePair" + } + }, + "policyIdPairs": { + "type": "array", + "items": { + "description": "Pairs of policy IDs with policy names.", + "$ref": "#/definitions/IdNamePair" + } + }, + "previews": { + "type": "array", + "items": { + "description": "List of preview snippets.", + "$ref": "#/definitions/ClassificationPreview" + } + }, + "policySummaries": { + "type": "array", + "items": { + "description": "List of per-policy summaries.", + "$ref": "#/definitions/ClassificationPreviewPolicySummary" + } + } + } + }, + "ClassificationPreviewPolicySummary": { + "type": "object", + "required": [ + "hits", + "policyId" + ], + "properties": { + "policyId": { + "type": "string", + "description": "Policy ID." + }, + "hits": { + "type": "integer", + "format": "int32", + "description": "Total hits for the policy." + } + } + }, + "ContentAnalysisStage": { + "type": "string", + "description": "A stage of a Lambda content analysis job.", + "enum": [ + "Queue", + "Prepare", + "Analyze", + "Upload", + "Cleanup", + "Finish" + ] + }, + "ContentAnalysisStats": { + "type": "object", + "required": [ + "numFilesAnalyzed", + "numFilesFailed", + "numFilesSkipped", + "numFilesTotal", + "numPathsTotal" + ], + "properties": { + "numPathsTotal": { + "type": "integer", + "format": "int64", + "description": "Number of total paths in the filesystem metadata." + }, + "numFilesTotal": { + "type": "integer", + "format": "int64", + "description": "Number of total files in the filesystem metadata." + }, + "numFilesAnalyzed": { + "type": "integer", + "format": "int64", + "description": "Number of files that have been succesfully analyzed." + }, + "numFilesFailed": { + "type": "integer", + "format": "int64", + "description": "Number of files that failed during analysis." + }, + "numFilesSkipped": { + "type": "integer", + "format": "int64", + "description": "Number of files that are skipped by analysis." + } + } + }, + "ContentAnalysisVersion": { + "type": "object", + "required": [ + "major", + "minor", + "patch" + ], + "properties": { + "major": { + "type": "integer", + "format": "int32", + "description": "The major version." + }, + "minor": { + "type": "integer", + "format": "int32", + "description": "The minor version." + }, + "patch": { + "type": "integer", + "format": "int32", + "description": "The patch version." + } + } + }, + "DataClassPolicy": { + "type": "object", + "required": [ + "analyzerOptions", + "chainId", + "extWhitelist", + "fileSizeLimit", + "policyMetadata", + "snappableId", + "uploadPrefix" + ], + "properties": { + "policyMetadata": { + "type": "string", + "description": "Arbitrary metadata associated with this policy config. This field will be used when determining whether a config already exists for a snappable i.e a policy configuration is uniquely defined by its policyMetadata." + }, + "snappableId": { + "type": "string", + "description": "Snappable managed ID for which to add the policy." + }, + "uploadPrefix": { + "type": "string", + "description": "Prefix to prepend to results uploaded from content analysis." + }, + "analyzerOptions": { + "type": "string", + "description": "Serialized string containing specialized config for analyzers used during content analysis.", + "x-secret": true + }, + "extWhitelist": { + "type": "array", + "items": { + "type": "string", + "description": "Array of file extensions that should be analyzed during content analysis." + } + }, + "fileSizeLimit": { + "type": "integer", + "format": "int32", + "description": "Max size of files to analyze (in bytes)." + }, + "chainId": { + "type": "string", + "description": "ID of the config chain that this policy config is part of. This chainId is used to determine whether the new policy requires a full analysis." + }, + "previewMetadata": { + "type": "string", + "description": "JSON-serialized blob of metadata used to support Previewer. CDM does not understand what this is. It is only used by Content Analyzer during the query codepath to process snippets for previewing.", + "x-secret": true + } + } + }, + "DefaultObjectStorageDetail": { + "type": "object", + "required": [ + "objectStorageConfig" + ], + "properties": { + "objectStorageConfig": { + "description": "The default object storage location details.", + "$ref": "#/definitions/ObjectStorageDetail" + } + }, + "x-rk-nullable-properties": [ + "objectStorageConfig" + ] + }, + "DefaultObjectStorageUpdate": { + "type": "object", + "required": [ + "objectStorageConfig" + ], + "properties": { + "objectStorageConfig": { + "description": "The default object storage location configuration properties.", + "$ref": "#/definitions/ObjectStorageConfig" + } + }, + "x-rk-nullable-properties": [ + "objectStorageConfig" + ] + }, + "DetectRansomwareConfig": { + "type": "object", + "required": [ + "snapshotId", + "uploadPrefix" + ], + "properties": { + "snapshotId": { + "type": "string", + "description": "ID of the snapshot to analyze for ransomware." + }, + "previousSnapshotId": { + "type": "string", + "description": "ID of the previous snapshot." + }, + "uploadPrefix": { + "type": "string", + "description": "Prefix to prepend to the object name." + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + }, + "timeLimitInSeconds": { + "type": "integer", + "format": "int32", + "description": "Maximum number of seconds to spend on analysis." + }, + "shouldScanFull": { + "type": "boolean", + "description": "Whether to scan all the files in the snapshot for ransomware analysis. If set to false, just the modified files will be scanned.", + "default": false + } + } + }, + "DownloadExternalFileConfig": { + "type": "object", + "required": [ + "destination", + "objectName", + "sha1Digest" + ], + "properties": { + "objectName": { + "type": "string", + "description": "Object name inside the default lambda storage bucket." + }, + "destination": { + "type": "string", + "description": "Destination file name to save the downloaded object as." + }, + "sha1Digest": { + "type": "string", + "description": "SHA1 digest of the file for integrity checking." + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "LdapObject": { + "type": "object", + "required": [ + "commonName", + "objectGuid", + "sid" + ], + "properties": { + "sid": { + "type": "string", + "description": "Security descriptor." + }, + "objectGuid": { + "type": "string", + "description": "The unique identifier for an object." + }, + "commonName": { + "type": "string", + "description": "User name." + }, + "objectCategory": { + "type": "string", + "description": "Category the object belongs to." + }, + "objectClass": { + "type": "string", + "description": "Object class to categorize groups and users." + }, + "distinguishedName": { + "type": "string", + "description": "X500 Distinguished Name." + }, + "member": { + "type": "string", + "description": "Objects that are direct members of group." + }, + "memberOf": { + "type": "string", + "description": "Groups of which the user is a direct member." + }, + "email": { + "type": "string", + "description": "Email address." + }, + "groups": { + "type": "array", + "description": "SIDs of the groups that this user belongs to.", + "items": { + "type": "string" + } + } + } + }, + "QuerySidsOnHostRequest": { + "type": "object", + "required": [ + "sids", + "snappableId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID assigned to the snappable that represents the host." + }, + "sids": { + "type": "array", + "description": "A list of SID strings to be resolved.", + "items": { + "type": "string" + } + } + } + }, + "QuerySidsOnHostResponse": { + "type": "object", + "required": [ + "sid_info" + ], + "properties": { + "sid_info": { + "type": "array", + "description": "A list of resolved sid objects.", + "items": { + "$ref": "#/definitions/SidInfo" + } + } + } + }, + "QuerySidsRequest": { + "type": "object", + "required": [ + "sids" + ], + "properties": { + "sids": { + "type": "array", + "description": "A list of SID strings to be resolved.", + "items": { + "type": "string" + } + } + } + }, + "QuerySidsResponse": { + "type": "object", + "required": [ + "ldap_info" + ], + "properties": { + "ldap_info": { + "type": "array", + "description": "A list of ldap objects with detailed information.", + "items": { + "$ref": "#/definitions/LdapObject" + } + } + } + }, + "ResolveSidConfig": { + "type": "object", + "required": [ + "sids", + "uploadObjectName" + ], + "properties": { + "sids": { + "type": "array", + "description": "a list of SID strings to be resolved.", + "items": { + "type": "string" + } + }, + "uploadObjectName": { + "type": "string", + "description": "Object name on target storage location for storing SID resolution result." + }, + "objectStorage": { + "description": "Target object storage location for the result of the resolve SID task run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "ResourceLambdaConfiguration": { + "type": "object", + "required": [ + "isDiffFmdEnabled", + "isFullFmdEnabled", + "resourceId" + ], + "properties": { + "resourceId": { + "type": "string", + "description": "ID of the snappable resource this lambda config is for." + }, + "isDiffFmdEnabled": { + "type": "boolean", + "description": "Enable automatic diff FMD upload." + }, + "isFullFmdEnabled": { + "type": "boolean", + "description": "Enable automatic full FMD upload." + }, + "lastAnalyzedSnapshotId": { + "type": "string", + "description": "ID of the most recent analyzed snapshot." + }, + "lastJobId": { + "type": "string", + "description": "ID of the most recent lambda job to have been run against the snappable identified by resourceId." + } + } + }, + "ResourceLambdaConfigurationUpdate": { + "type": "object", + "required": [ + "isDiffFmdEnabled", + "isFullFmdEnabled" + ], + "properties": { + "isDiffFmdEnabled": { + "type": "boolean", + "description": "Enable automatic diff FMD upload." + }, + "isFullFmdEnabled": { + "type": "boolean", + "description": "Enable automatic full FMD upload." + } + } + }, + "SidInfo": { + "type": "object", + "required": [ + "commonName", + "domainName", + "sid", + "sidType" + ], + "properties": { + "sid": { + "type": "string", + "description": "Security descriptor." + }, + "commonName": { + "type": "string", + "description": "User name." + }, + "domainName": { + "type": "string", + "description": "Domain name." + }, + "sidType": { + "type": "integer", + "format": "int32", + "description": "sid type." + } + } + }, + "SonarForceFullRequest": { + "type": "object", + "required": [ + "snappableId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID assigned to the snappable that is the source of the referenced snapshots." + } + } + }, + "SyncDataClassPoliciesRequest": { + "type": "object", + "required": [ + "policies" + ], + "properties": { + "policies": { + "type": "array", + "description": "List of all policy configurations that should exist.", + "items": { + "$ref": "#/definitions/DataClassPolicy" + } + } + } + }, + "UpgradeContentAnalyzerConfig": { + "type": "object", + "required": [ + "objectName", + "sha1Digest" + ], + "properties": { + "objectName": { + "type": "string", + "description": "Object name inside the lambda storage bucket." + }, + "sha1Digest": { + "type": "string", + "description": "SHA1 digest of the file for integrity checking." + }, + "signatureFile": { + "type": "string", + "description": "REQUIRED (5.1+). Signature file inside the lambda storage bucket used for verification of the binary." + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "UploadFmdConfig": { + "type": "object", + "required": [ + "shouldUploadDiff", + "shouldUploadFull", + "snappableId", + "snapshotIds", + "uploadPrefix" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID assigned to the snappable that is the source of the referenced snapshots." + }, + "snapshotIds": { + "type": "array", + "items": { + "type": "string", + "description": "Array containing the IDs of the snapshots selected for upload of filesystem metadata." + } + }, + "previousSnapshotIds": { + "type": "array", + "items": { + "type": "string", + "description": "Array containing the IDs of the specified previous snapshot that corresponds to the snapshot at the same position of snapshotIds." + } + }, + "uploadPrefix": { + "type": "string", + "description": "Prefix to prepend to the object name." + }, + "shouldUploadDiff": { + "type": "boolean", + "description": "Boolean that determines if the differential filesystem metadata should be uploaded." + }, + "shouldUploadFull": { + "type": "boolean", + "description": "Boolean that determines if the full filesystem metadata should be uploaded." + }, + "filterExtensions": { + "type": "array", + "items": { + "type": "string", + "description": "Array containing file extensions that will be kept in the filesystem metadata after filtering e.g. [.doc, .pdf, ...]." + } + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "UploadRecentFmdConfig": { + "type": "object", + "required": [ + "numberOfSnapshotsToUpload", + "shouldUploadDiff", + "snappableId", + "uploadPrefix" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID assigned to the snappable that is the source of the snapshots to upload." + }, + "numberOfSnapshotsToUpload": { + "type": "integer", + "format": "int32", + "description": "The number of recent snapshots to upload." + }, + "uploadPrefix": { + "type": "string", + "description": "Prefix to prepend to the object name." + }, + "shouldUploadDiff": { + "type": "boolean", + "description": "Boolean that determines if the filesystem metadata uploaded are full or differentials." + }, + "objectStorage": { + "description": "Target object storage location for the results of the Lambda run. When not specified, the default Lambda object storage location is used.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "Language": { + "type": "string", + "description": "Type of langauge.", + "enum": [ + "English", + "Japanese" + ] + }, + "AdvancedLdapConfiguration": { + "type": "object", + "properties": { + "groupSearchFilter": { + "type": "string", + "description": "A string representation of the LDAP group search filter in RFC4515 format. For example, a group search filter for Active Directory has the string representation (objectCategory=group)." + }, + "groupMemberAttribute": { + "type": "string", + "description": "LDAP field that contains the group members. For example, Active Directory uses the field \"member\"." + }, + "groupMembershipAttribute": { + "type": "string", + "description": "Points to the group that this entry belongs to. For example, Active Directory uses the field \"memberOf\"." + }, + "groupMaxLevel": { + "type": "integer", + "format": "int32", + "description": "Maximum level of groups to query. Set to 1 to query the immediate groups to which a user belongs. Leave blank to query all the groups to which a user belongs. Valid values are between 1 and 50 inclusive. When ldapSearchAcrossIntegrations is set to be true, this value is ignored. When this value is set, then for this ldap service, ldapActiveDirectoryDisableMatchingRuleInChain is ignored and assumed to be true." + }, + "userSearchFilter": { + "type": "string", + "description": "A string representation of the LDAP user search filter in RFC4515 format. For example, an Active Directory user search filter that selects all enabled user objects has the following string representation (&(objectCategory=person) (objectClass=user) (!(userAccountControl:1.2.840.113556.1.4.803:=2)))." + }, + "userNameSearchAttribute": { + "type": "string", + "description": "Specifies the user name. Active Directory searches can use the attributes sAMAccountName and userPrincipalName." + } + } + }, + "LdapDomainName": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of an LDAP domain." + } + } + }, + "LdapServiceInfo": { + "type": "object", + "required": [ + "bindUserName", + "bindUserPassword" + ], + "properties": { + "dynamicDnsName": { + "type": "string", + "description": "Dynamic DNS name for locating authentication servers." + }, + "bindUserName": { + "type": "string", + "description": "The name of the user that searches the authentication server for other users." + }, + "bindUserPassword": { + "type": "string", + "description": "Password for the bind user.", + "x-secret": true + }, + "baseDn": { + "type": "string", + "description": "The path to the directory where searches for users begin.", + "x-hidden": true + }, + "authServers": { + "type": "array", + "description": "An ordered list of authentication servers. Servers on this list have priority over servers discovered using dynamic DNS.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Human friendly name." + }, + "advancedOptions": { + "$ref": "#/definitions/AdvancedLdapConfiguration" + }, + "mfaServerId": { + "type": "string", + "description": "MFA server associated with LDAP service." + }, + "isTotpEnforced": { + "type": "boolean", + "description": "Indicates whether the time-based one time password (TOTP) authentication method is being enforced. Returns true when TOTP is enforced and false when TOTP is not enforced.\n" + }, + "certificateId": { + "type": "string", + "description": "ID of the imported certificate to use for connections to this server." + } + } + }, + "LegalHoldDownloadConfig": { + "type": "object", + "required": [ + "isLegalHoldDownload" + ], + "properties": { + "isLegalHoldDownload": { + "type": "boolean", + "description": "Specifies if the download action is in response to a Legal Hold. This download generates a SHA1 checksum of downloaded data that external bodies can use for integrity verification." + } + } + }, + "Link": { + "type": "object", + "required": [ + "href", + "rel" + ], + "properties": { + "href": { + "type": "string", + "description": "The destination of the link." + }, + "rel": { + "type": "string", + "description": "The relation of the destination of this link to the current resource." + } + } + }, + "ManagedIdList": { + "type": "object", + "required": [ + "managedIds" + ], + "properties": { + "managedIds": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ManagedObjectAncestor": { + "type": "object", + "required": [ + "managedId", + "name" + ], + "properties": { + "managedId": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "ManagedObjectDescendantCounts": { + "allOf": [ + { + "$ref": "#/definitions/ManagedObjectDescendantCountAppBlueprintFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountFilesetFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountMssqlFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountOracleFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountSapHanaFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountStorageArrayVolumeGroupFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountVcdVappFields" + }, + { + "$ref": "#/definitions/ManagedObjectDescendantCountVolumeGroupFields" + }, + { + "type": "object", + "properties": { + "virtualMachine": { + "type": "integer", + "format": "int32" + } + } + } + ] + }, + "ManagedObjectLocations": { + "type": "object", + "properties": { + "folder": { + "type": "array", + "description": "VMware folder hierarchy.", + "items": { + "$ref": "#/definitions/ManagedObjectAncestor" + } + }, + "infrastructure": { + "type": "array", + "description": "Infrastructure hierarchy (cluster/host/server/VM).", + "items": { + "$ref": "#/definitions/ManagedObjectAncestor" + } + }, + "physical": { + "type": "array", + "description": "Physical host hierarchy.", + "items": { + "$ref": "#/definitions/ManagedObjectAncestor" + } + } + } + }, + "ManagedObjectProperties": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "clusterName": { + "type": "string" + }, + "operatingSystem": { + "type": "string" + }, + "operatingSystemType": { + "type": "string" + }, + "instanceName": { + "type": "string", + "description": "Name of the cloud native virtual machine instance." + } + } + }, + "ManagedObjectSummary": { + "type": "object", + "required": [ + "descendantCounts", + "isDeleted", + "locations", + "managedId", + "name", + "objectType", + "primaryClusterId", + "properties" + ], + "properties": { + "managedId": { + "type": "string" + }, + "objectType": { + "type": "string", + "description": "Type of the object (e.g., VirtualMachine)." + }, + "name": { + "type": "string", + "description": "Name of the object (e.g., VM name or physical host hostname." + }, + "primaryClusterId": { + "type": "string", + "description": "ID of the primary cluster." + }, + "isDeleted": { + "type": "boolean", + "description": "Whether or not this managed object is deleted." + }, + "isRelic": { + "type": "boolean", + "description": "Whether or not this managed object is a relic. Only returned for snappable nodes." + }, + "effectiveSlaDomainId": { + "type": "string", + "description": "Id of the effective sla domain. Only returned for snappable nodes." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "Name of the effective sla domain. Only returned for snappable nodes." + }, + "descendantCounts": { + "description": "Count of all the descendants of each type (recursively).", + "$ref": "#/definitions/ManagedObjectDescendantCounts" + }, + "locations": { + "description": "Paths from the hierarchy root to the object (multiple may exist), expressed as arrays of managed IDs.", + "$ref": "#/definitions/ManagedObjectLocations" + }, + "properties": { + "description": "Properties of the object.", + "$ref": "#/definitions/ManagedObjectProperties" + }, + "isEffectiveSlaDomainRetentionLocked": { + "type": "boolean", + "description": "A Boolean that indicates whether the effective SLA Domain is Retention Locked. When this value is 'true', the effective SLA Domain is a Retention Lock SLA Domain." + } + } + }, + "ManagedObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ManagedObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedVolumeApplicationTag": { + "type": "string", + "description": "Application whose data will be stored in managed volume.", + "enum": [ + "Oracle", + "OracleIncremental", + "MsSql", + "SapHana", + "SapHanaLog", + "MySql", + "PostgreSql", + "DbTransactionLog", + "RecoverX" + ] + }, + "ManagedVolumeChannelConfig": { + "type": "object", + "required": [ + "ipAddress", + "mountPoint" + ], + "properties": { + "ipAddress": { + "type": "string", + "description": "IP address of channel export." + }, + "mountPoint": { + "type": "string", + "description": "The path of the NFS mount if exported over NFS, or the SMB share name if exported over SMB." + }, + "hostMountPoint": { + "type": "string", + "description": "Directory path on the host machine used to export the NFS mount or SMB share." + } + } + }, + "ManagedVolumeConfig": { + "type": "object", + "required": [ + "exportConfig", + "name", + "volumeSize" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the managed volume." + }, + "mvType": { + "$ref": "#/definitions/ManagedVolumeType" + }, + "applicationTag": { + "description": "Application whose data will be stored by this managed volume, like - Oracle, SAP Hana, MS SQL, etc.", + "$ref": "#/definitions/ManagedVolumeApplicationTag" + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels to divide the volume into. Each channel provides a unique share to write to.", + "minimum": 0 + }, + "subnet": { + "type": "string", + "description": "IP subnet that specifies an outgoing VLAN interface for a Rubrik node. This is a required value when creating a managed volume on a Rubrik node that has multiple VLAN interfaces." + }, + "volumeSize": { + "type": "integer", + "format": "int64", + "description": "Maximum capacity for the volume across all the channels.", + "minimum": 0 + }, + "exportConfig": { + "$ref": "#/definitions/ManagedVolumeExportConfig" + }, + "slaClientConfig": { + "$ref": "#/definitions/SlaManagedVolumeClientConfig" + } + } + }, + "ManagedVolumeDetail": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeSummary" + } + ] + }, + "ManagedVolumeDownloadFileJobConfig": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "type": "string", + "description": "Absolute file path." + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "ManagedVolumeDownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array that contains the full source path of each file and folder in a download job. This array must contain at least one path. All Windows paths in the array must be on the same disk.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "ManagedVolumeExport": { + "type": "object", + "required": [ + "channels", + "config", + "isActive" + ], + "properties": { + "isActive": { + "type": "boolean", + "description": "Is export active." + }, + "channels": { + "type": "array", + "description": "Channels of this export.", + "items": { + "$ref": "#/definitions/ManagedVolumeChannelConfig" + } + }, + "config": { + "$ref": "#/definitions/ManagedVolumeExportConfig" + } + } + }, + "ManagedVolumeExportConfig": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumePatchConfig" + }, + { + "type": "object", + "properties": { + "subnet": { + "type": "string", + "description": "IP subnet that specifies an outgoing VLAN interface for a Rubrik node. This is a required value when creating a managed volume on a Rubrik node that has multiple VLAN interfaces." + }, + "shareType": { + "description": "Specifies if the managed volume is exported over NFS or SMB. This defaults to NFS if this optional property is not specified.", + "$ref": "#/definitions/ManagedVolumeShareType" + } + } + } + ] + }, + "ManagedVolumeInflightSnapshotSummary": { + "type": "object", + "required": [ + "snapshotId" + ], + "properties": { + "snapshotId": { + "type": "string", + "description": "ID of the snapshot that will be created when the snapshot ends." + }, + "ownerId": { + "type": "string", + "description": "An ID representing the owner of a snapshot." + } + } + }, + "ManagedVolumePatchConfig": { + "type": "object", + "properties": { + "hostPatterns": { + "type": "array", + "description": "List of host patterns. A host pattern describes a set of hosts who can mount the host. It can either be a host name, a network in CIDR notation or hostnames matching wildcards * or ?.", + "items": { + "type": "string" + } + }, + "nodeHint": { + "type": "array", + "description": "List of node-ids to use for mounting this managed volume channels. Caller should specify at least one node per channel in the managed volume. If the nodeHint is not provided, system will randomly select a subset of nodes in cluster to mount the channels.", + "items": { + "type": "string" + } + }, + "smbDomainName": { + "type": "string", + "description": "Valid Active Directory domain name for users accessing this managed volume over SMB." + }, + "smbValidUsers": { + "type": "array", + "description": "List of valid usersnames in the domain that can access the SMB share for this managed volume. This parameter is required when the value of shareType is SMB.", + "items": { + "type": "string" + } + }, + "smbValidIps": { + "type": "array", + "description": "List of valid SMB host IP addresses that can access the SMB share for this managed volume. This parameter is required when the value of shareType is SMB.", + "items": { + "type": "string" + } + } + } + }, + "ManagedVolumePatchSlaClientConfig": { + "type": "object", + "required": [ + "shouldDisablePostBackupScriptOnBackupFailure", + "shouldDisablePostBackupScriptOnBackupSuccess", + "shouldDisablePreBackupScript" + ], + "properties": { + "clientHostId": { + "type": "string", + "description": "The ID of the host that mounts the managed volume channels and where the backup scripts run." + }, + "username": { + "type": "string", + "description": "The name of the user that runs the scripts on the host." + }, + "backupScriptCommand": { + "type": "string", + "description": "The full command with arguments to execute the main backup script that backs up data from the host." + }, + "backupScriptTimeout": { + "type": "integer", + "format": "int64", + "description": "An optional timeout for the main backup script in seconds. When this value is 0 or unspecified no timeout is used.", + "minimum": 0 + }, + "shouldDisablePreBackupScript": { + "type": "boolean", + "description": "Specifies whether to disable the execution of the optional pre-backup script.", + "default": false + }, + "preBackupScriptCommand": { + "type": "string", + "description": "The full command with arguments to execute the optional pre-backup script that runs after data backup is complete." + }, + "preBackupScriptTimeout": { + "type": "integer", + "format": "int64", + "description": "An optional timeout for the pre-backup script in seconds. When this value is 0 or unspecified no timeout is used.", + "minimum": 0 + }, + "shouldDisablePostBackupScriptOnBackupSuccess": { + "type": "boolean", + "description": "Specifies whether to disable the execution of the optional post-backup script that runs after data backup is complete.", + "default": false + }, + "postBackupScriptOnBackupSuccessCommand": { + "type": "string", + "description": "The full command with arguments to execute the optional post-backup script that runs after data backup is complete." + }, + "postBackupScriptOnBackupSuccessTimeout": { + "type": "integer", + "format": "int64", + "description": "An optional timeout for the post-backup script that runs after data backup is complete in seconds. When this value is 0 or unspecified no timeout is used.", + "minimum": 0 + }, + "shouldDisablePostBackupScriptOnBackupFailure": { + "type": "boolean", + "description": "Specifies whether to disable the execution of the optional post-backup script that runs after unsuccessful data backup.", + "default": false + }, + "postBackupScriptOnBackupFailureCommand": { + "type": "string", + "description": "The full command with arguments to execute the optional post-backup script that runs after unsuccessful data backup." + }, + "postBackupScriptOnBackupFailureTimeout": { + "type": "integer", + "format": "int64", + "description": "An optional timeout for the post-backup script that runs after unsuccessful data backup in seconds. When this value is 0 or unspecified no timeout is used.", + "minimum": 0 + }, + "shouldCancelBackupOnPreBackupScriptFailure": { + "type": "boolean", + "description": "Specifies whether a failure of the pre-backup script halts the backup process." + }, + "channelHostMountPaths": { + "type": "array", + "description": "A list of mount paths where the host mounts individual channels for managed volumes.", + "items": { + "type": "string" + } + } + } + }, + "ManagedVolumeResize": { + "type": "object", + "properties": { + "newSize": { + "type": "integer", + "format": "int64", + "description": "New size of the managed volume." + } + } + }, + "ManagedVolumeShareType": { + "type": "string", + "description": "Type of exported share.", + "enum": [ + "NFS", + "SMB" + ] + }, + "ManagedVolumeSlaExportConfig": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeExportConfig" + }, + { + "type": "object", + "required": [ + "hostId", + "hostMountPaths" + ], + "properties": { + "hostId": { + "type": "string", + "description": "Managed ID of the host on which this snapshot export is supposed to be mounted." + }, + "hostMountPaths": { + "type": "array", + "description": "Valid paths on the host where the NFS/SMB mount points from this snapshot export are to be mounted.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "ManagedVolumeSlaObjectCount": { + "type": "object", + "properties": { + "numManagedVolumes": { + "type": "integer", + "format": "int32", + "description": "The number of Managed volumes protected under this SLA Domain." + } + } + }, + "ManagedVolumeSnapshotConfig": { + "type": "object", + "properties": { + "retentionConfig": { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + } + } + }, + "ManagedVolumeSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeSnapshotSummary" + } + ] + }, + "ManagedVolumeSnapshotExportSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeExport" + }, + { + "type": "object", + "required": [ + "exportedDate", + "id", + "mvType", + "snapshotDate", + "snapshotId", + "sourceManagedVolumeId", + "sourceManagedVolumeName" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of managed volume snapshot export." + }, + "snapshotId": { + "type": "string", + "description": "Snapshot this export is based off." + }, + "snapshotDate": { + "type": "string", + "format": "date-time", + "description": "Date of the snapshot this export is based off." + }, + "sourceManagedVolumeId": { + "type": "string", + "description": "ID of the managed volume this export belongs to." + }, + "sourceManagedVolumeName": { + "type": "string", + "description": "Name of the managed volume this export belongs to." + }, + "exportedDate": { + "type": "string", + "format": "date-time", + "description": "Exported date of the managed volume snapshot." + }, + "mvType": { + "$ref": "#/definitions/ManagedVolumeType" + } + } + } + ] + }, + "ManagedVolumeSnapshotExportSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ManagedVolumeSnapshotExportSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedVolumeSnapshotLinks": { + "type": "object", + "properties": { + "exportLink": { + "$ref": "#/definitions/Link" + }, + "self": { + "$ref": "#/definitions/Link" + } + } + }, + "ManagedVolumeSnapshotReference": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeSnapshotReferenceDefinition" + }, + { + "type": "object", + "required": [ + "refId" + ], + "properties": { + "refId": { + "type": "string", + "description": "A unique string representing a reference to a snapshot." + } + } + } + ] + }, + "ManagedVolumeSnapshotReferenceDefinition": { + "allOf": [ + { + "$ref": "#/definitions/ManagedVolumeSnapshotReferencePatch" + }, + { + "type": "object", + "required": [ + "ownerId" + ], + "properties": { + "ownerId": { + "type": "string", + "description": "An ID representing the owner of a snapshot. All references to a snapshot must use the same ID." + } + } + } + ] + }, + "ManagedVolumeSnapshotReferencePatch": { + "type": "object", + "properties": { + "expiryDurationInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies a time interval in minutes. This reference expires from the snapshot after the specified interval. A value of -1 indicates that the snapshot does not expire." + } + } + }, + "ManagedVolumeSnapshotReferenceSummary": { + "type": "object", + "required": [ + "addedSnapshotReferenceCount", + "references" + ], + "properties": { + "addedSnapshotReferenceCount": { + "type": "integer", + "format": "int32", + "description": "Count of references added to the in-flight snapshot. This also includes references which were added and then removed from the in-flight snapshot." + }, + "references": { + "type": "array", + "description": "List of currently active references in the in-flight snapshot.", + "items": { + "$ref": "#/definitions/ManagedVolumeSnapshotReference" + } + } + } + }, + "ManagedVolumeSnapshotReferenceWrapper": { + "type": "object", + "properties": { + "reference": { + "description": "A wrapper around ManagedVolumeSnapshotReference to be used when an optional argument is needed.", + "$ref": "#/definitions/ManagedVolumeSnapshotReference" + } + } + }, + "ManagedVolumeSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "links" + ], + "properties": { + "links": { + "description": "Links to actions available on the snapshot.", + "$ref": "#/definitions/ManagedVolumeSnapshotLinks" + }, + "isQueuedSnapshot": { + "type": "boolean", + "description": "A Boolean that specifies whether the snapshot is queued, to be stored as a patch file. When this value is 'true', the snapshot is in queue and not yet stored as a patch file.", + "default": false + } + } + } + ] + }, + "ManagedVolumeSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ManagedVolumeSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedVolumeState": { + "type": "string", + "description": "State of a managed volume.", + "enum": [ + "ExportRequested", + "Exporting", + "Exported", + "UnexportRequested", + "Unexporting", + "Destroyed", + "ResetRequested", + "Resetting", + "ResizeRequested", + "Resizing", + "SnapshotRequested", + "Snapshotting" + ] + }, + "ManagedVolumeSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "hostPatterns", + "isDeleted", + "isRelic", + "isWritable", + "numChannels", + "pendingSnapshotCount", + "shareType", + "snapshotCount", + "state", + "usedSize", + "volumeSize" + ], + "properties": { + "mvType": { + "$ref": "#/definitions/ManagedVolumeType" + }, + "snapshotCount": { + "type": "integer", + "format": "int32", + "description": "Number of snapshots." + }, + "pendingSnapshotCount": { + "type": "integer", + "format": "int32", + "description": "Combined total of in-progress snapshots and pending snapshots." + }, + "isRelic": { + "type": "boolean", + "description": "Is managed volume a relic." + }, + "applicationTag": { + "description": "Application whose data will be stored by this managed volume, like - Oracle, SAP Hana, MS SQL, etc.", + "$ref": "#/definitions/ManagedVolumeApplicationTag" + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels to divide the volume into. Each channel provides a unique share to write to." + }, + "volumeSize": { + "type": "integer", + "format": "int64", + "description": "Maximum capacity for the volume across all the channels in bytes." + }, + "usedSize": { + "type": "integer", + "format": "int64", + "description": "Used capacity for the volume across all the channels in bytes." + }, + "state": { + "description": "Managed volume state like exported, resetting etc.,.", + "$ref": "#/definitions/ManagedVolumeState" + }, + "hostPatterns": { + "type": "array", + "description": "List of host patterns. A host pattern describes a set of hosts who can mount the host. It can either be a host name, a network in CIDR notation or hostnames matching wildcards * or ?.", + "items": { + "type": "string" + } + }, + "mainExport": { + "$ref": "#/definitions/ManagedVolumeExport" + }, + "isWritable": { + "type": "boolean", + "description": "Indicates whether managed volume is open for writes." + }, + "links": { + "type": "array", + "description": "List of links for the managed volume.", + "items": { + "$ref": "#/definitions/Link" + } + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the managed volume is deleted." + }, + "shareType": { + "description": "Specifies if the managed volume is exported over NFS or SMB.", + "$ref": "#/definitions/ManagedVolumeShareType" + }, + "smbDomainName": { + "type": "string", + "description": "Valid Active Directory domain name for users accessing this managed volume over SMB." + }, + "smbValidUsers": { + "type": "array", + "description": "List of valid usersnames in the domain that can access the SMB share for this managed volume. This parameter is required when the value of shareType is SMB.", + "items": { + "type": "string" + } + }, + "smbValidIps": { + "type": "array", + "description": "List of valid SMB host IP addresses that can access the SMB share for this managed volume. This parameter is required when the value of shareType is SMB.", + "items": { + "type": "string" + } + }, + "subnet": { + "type": "string", + "description": "Specify the subnet associated with the managed volume." + }, + "slaManagedVolumeDetails": { + "description": "The additional details specific to SLA Managed Volumes.", + "$ref": "#/definitions/SlaManagedVolumeDetail" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "ManagedVolumeSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ManagedVolumeSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedVolumeType": { + "type": "string", + "description": "Type of managed volume.", + "enum": [ + "AlwaysMounted", + "SlaBased" + ] + }, + "ManagedVolumeUpdate": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Change the name of this managed volume." + }, + "subnet": { + "type": "string", + "description": "Change the IP subnet that specifies an outgoing VLAN interface for a Rubrik node. This option is only available for SLA Managed Volumes." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this managed volume to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "volumeSize": { + "type": "integer", + "format": "int64", + "description": "Increase capacity for the volume across all the channels.", + "minimum": 0 + }, + "config": { + "$ref": "#/definitions/ManagedVolumePatchConfig" + }, + "slaClientConfig": { + "$ref": "#/definitions/ManagedVolumePatchSlaClientConfig" + } + } + }, + "SlaManagedVolumeClientConfig": { + "type": "object", + "required": [ + "backupScript", + "channelHostMountPaths", + "clientHostId", + "username" + ], + "properties": { + "clientHostId": { + "type": "string", + "description": "The ID of the host that mounts the managed volume channels and where the backup scripts run." + }, + "username": { + "type": "string", + "description": "The name of the user that runs the scripts on the host." + }, + "backupScript": { + "description": "Specifies configuration information for the main backup script that backs up data from the host.", + "$ref": "#/definitions/SlaManagedVolumeScriptConfig" + }, + "preBackupScript": { + "description": "Specifies configuration information for the optional pre-backup script that runs before data backup begins.", + "$ref": "#/definitions/SlaManagedVolumeScriptConfig" + }, + "postBackupScriptOnBackupSuccess": { + "description": "Specifies configuration information for the optional post-backup script that runs after data backup is complete.", + "$ref": "#/definitions/SlaManagedVolumeScriptConfig" + }, + "postBackupScriptOnBackupFailure": { + "description": "Specifies configuration information for the optional post-backup script that runs after data backup failed.", + "$ref": "#/definitions/SlaManagedVolumeScriptConfig" + }, + "shouldCancelBackupOnPreBackupScriptFailure": { + "type": "boolean", + "description": "Specifies whether a failure of the pre-backup script halts the backup process." + }, + "channelHostMountPaths": { + "type": "array", + "description": "A list of mount paths where the host mounts individual channels for managed volumes.", + "items": { + "type": "string" + } + } + } + }, + "SlaManagedVolumeDetail": { + "type": "object", + "required": [ + "backupScriptDetails", + "channelHostMountPaths", + "hostDetails" + ], + "properties": { + "hostDetails": { + "description": "Configuration of the host on which the SLA Managed Volume channels are mounted.", + "$ref": "#/definitions/SlaManagedVolumeHostSummary" + }, + "backupScriptDetails": { + "description": "Backup script specifications.", + "$ref": "#/definitions/SlaManagedVolumeScriptSummary" + }, + "preBackupScriptDetails": { + "description": "Specifications of the script run prior to backup.", + "$ref": "#/definitions/SlaManagedVolumeScriptSummary" + }, + "postBackupScriptOnBackupSuccessDetails": { + "description": "Specifications of the script run after successful backup.", + "$ref": "#/definitions/SlaManagedVolumeScriptSummary" + }, + "postBackupScriptOnBackupFailureDetails": { + "description": "Specifications of the script run after unsuccessful backup.", + "$ref": "#/definitions/SlaManagedVolumeScriptSummary" + }, + "shouldCancelBackupOnPreBackupScriptFailure": { + "type": "boolean", + "description": "Indicates if a failure of the script run prior to backup halts the backup process." + }, + "channelHostMountPaths": { + "type": "array", + "description": "List of paths the host uses to mount individual channels for managed volumes.", + "items": { + "type": "string" + } + } + } + }, + "SlaManagedVolumeHostSummary": { + "type": "object", + "required": [ + "hostId", + "hostName", + "operatingSystemType", + "rubrikBackupServiceStatus" + ], + "properties": { + "hostId": { + "type": "string", + "description": "ID of the host mounting the managed volume channels and running the scripts." + }, + "hostName": { + "type": "string", + "description": "The name of the host on which the SLA Managed Volume channels are mounted." + }, + "rubrikBackupServiceStatus": { + "type": "string", + "description": "The status of the Rubrik Backup Service (RBS) installed on the managed volume host. Possible responses are `Connected`, `Disconnected` and `REPLICATION_TARGET` when the host is being replicated from a separate Rubrik cluster." + }, + "operatingSystemType": { + "type": "string", + "description": "The type of the operating system running on the host. Possible responses are `Linux`, `Windows` and `UnixLike`." + } + } + }, + "SlaManagedVolumeScriptConfig": { + "type": "object", + "required": [ + "scriptCommand" + ], + "properties": { + "scriptCommand": { + "type": "string", + "description": "The full command with arguments to execute the script." + }, + "timeout": { + "type": "integer", + "format": "int64", + "description": "An optional timeout for the script in seconds. When this value is 0 or unspecified no timeout is used.", + "minimum": 0 + } + } + }, + "SlaManagedVolumeScriptSummary": { + "type": "object", + "required": [ + "runAsUser", + "scriptCommand" + ], + "properties": { + "runAsUser": { + "type": "string", + "description": "Name of the user running the script on the host." + }, + "scriptCommand": { + "type": "string", + "description": "The full command with arguments to execute the script." + }, + "timeout": { + "type": "integer", + "format": "int64", + "description": "(Optional) Timeout period, in seconds, for the script. Specifying 0, or not including a value, indicates there is no timeout period.", + "minimum": 0 + } + } + }, + "MfaAuthRequest": { + "type": "object", + "required": [ + "attemptId", + "challengeId", + "challengeSetId" + ], + "properties": { + "attemptId": { + "type": "string", + "description": "ID of the current MFA authentication attempt." + }, + "challengeSetId": { + "type": "string", + "description": "The challenge set that was chosen from the sets provided on your previous authentication call.\n" + }, + "challengeId": { + "type": "string", + "description": "The specific challenge chosen from the challengeSet.\n" + }, + "credValue": { + "type": "string", + "description": "The passcode for the chosen challenge.", + "x-secret": true + } + } + }, + "MfaAuthResponse": { + "type": "object", + "required": [ + "attemptId", + "challengeSets" + ], + "properties": { + "attemptId": { + "type": "string", + "description": "ID of the current MFA authentication attempt." + }, + "challengeSets": { + "type": "array", + "description": "Sets of challenges returned by the MFA Server.", + "items": { + "$ref": "#/definitions/MfaChallengeSet" + } + }, + "reason": { + "type": "string" + } + } + }, + "MfaAuthenticationStatus": { + "type": "string", + "description": "Status of the current authentication attempt.\n", + "enum": [ + "Ok", + "Fail", + "Challenge" + ] + }, + "MfaChallenge": { + "type": "object", + "required": [ + "challengeId", + "description", + "isInputRequired", + "isValueBeingDefined", + "params" + ], + "properties": { + "challengeId": { + "type": "string", + "description": "Unique identifier for this challenge." + }, + "description": { + "type": "string", + "description": "Provides information about this challenge.\n" + }, + "isInputRequired": { + "type": "boolean", + "description": "Specifies whether or not this challenge requires input.\n" + }, + "isValueBeingDefined": { + "type": "boolean", + "description": "Specifies whether or not this challenge defines a value.\n" + }, + "params": { + "$ref": "#/definitions/Map_String" + } + } + }, + "MfaChallengeSet": { + "type": "object", + "required": [ + "challengeSetId" + ], + "properties": { + "challengeSetId": { + "type": "string", + "description": "Unique identifier for these set of challenges.\n" + }, + "challenges": { + "type": "array", + "description": "The secondary authentication methods to confirm the user's identity.\n", + "items": { + "$ref": "#/definitions/MfaChallenge" + } + } + } + }, + "AdaptiveThrottlingSettingsMssqlFields": { + "type": "object", + "properties": { + "mssqlThrottlingSettings": { + "$ref": "#/definitions/MssqlAdaptiveThrottlingSettings" + } + } + }, + "ManagedObjectDescendantCountMssqlFields": { + "type": "object", + "properties": { + "mssqlDatabase": { + "type": "integer", + "format": "int32" + } + } + }, + "MssqlAdaptiveThrottlingSettings": { + "type": "object", + "properties": { + "hostIoLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold SQL Server host latency value that determines whether to postpone a scheduled backup of a database on the host. Specify the threshold value in milliseconds (ms)." + }, + "cpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold SQL Server host CPU utilization value that determines whether to postpone a scheduled backup of a database on the host. Specify the threshold value as a percentage." + } + } + }, + "MssqlAvailabilityGroupDetail": { + "allOf": [ + { + "$ref": "#/definitions/MssqlAvailabilityGroupSummary" + } + ] + }, + "MssqlAvailabilityGroupSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "$ref": "#/definitions/MssqlSlaRelatedProperties" + } + ] + }, + "MssqlAvailabilityGroupUpdate": { + "allOf": [ + { + "$ref": "#/definitions/MssqlSlaPatchProperties" + } + ] + }, + "MssqlBatchBackupCanceledSummary": { + "type": "object", + "required": [ + "databaseId" + ], + "properties": { + "databaseId": { + "type": "string", + "description": "ID of the Microsoft SQL database." + } + } + }, + "MssqlBatchBackupFailureSummary": { + "type": "object", + "required": [ + "databaseId", + "error" + ], + "properties": { + "databaseId": { + "type": "string", + "description": "ID of the Microsoft SQL database." + }, + "error": { + "type": "string", + "description": "Information specifying why this snapshot failed." + } + } + }, + "MssqlBatchBackupJobConfig": { + "allOf": [ + { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + }, + { + "type": "object", + "properties": { + "databaseIds": { + "type": "array", + "description": "IDs of the Microsoft SQL databases. All databases in this list are considered for taking an on demand snapshot.", + "items": { + "type": "string" + } + }, + "instanceIds": { + "type": "array", + "description": "IDs of the Microsoft SQL instances. All non-availability databases on these instances are considered for taking an on demand snapshot.", + "items": { + "type": "string" + } + }, + "hostIds": { + "type": "array", + "description": "IDs of the hosts. All databases with a `rootId` belonging to this list are considered for taking an on demand snapshot.", + "items": { + "type": "string" + } + }, + "windowsClusterIds": { + "type": "array", + "description": "IDs of the Windows clusters. All databases with a `rootId` belonging to this list are considered for taking an on demand snapshot.", + "items": { + "type": "string" + } + }, + "availabilityGroupIds": { + "type": "array", + "description": "IDs of the Microsoft SQL availability groups. All databases with a `rootId` belonging to this list are considered for taking an on demand snapshot.", + "items": { + "type": "string" + } + }, + "forceFullSnapshot": { + "type": "boolean", + "description": "Determines whether to force a full or incremental snapshot." + } + } + } + ] + }, + "MssqlBatchBackupSuccessSummary": { + "type": "object", + "required": [ + "databaseId", + "snapshotId" + ], + "properties": { + "databaseId": { + "type": "string", + "description": "ID of the Mirosoft SQL database." + }, + "snapshotId": { + "type": "string", + "description": "ID of the snapshot." + } + } + }, + "MssqlBatchBackupSummary": { + "type": "object", + "required": [ + "canceledSnapshots", + "failedSnapshots", + "id", + "successfulSnapshots" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the on-demand backup request." + }, + "successfulSnapshots": { + "type": "array", + "description": "List of summary information for each successful snapshot.", + "items": { + "$ref": "#/definitions/MssqlBatchBackupSuccessSummary" + } + }, + "failedSnapshots": { + "type": "array", + "description": "List of summary information for each failed snapshot.", + "items": { + "$ref": "#/definitions/MssqlBatchBackupFailureSummary" + } + }, + "canceledSnapshots": { + "type": "array", + "description": "List of summary information for each canceled snapshot.", + "items": { + "$ref": "#/definitions/MssqlBatchBackupCanceledSummary" + } + } + } + }, + "MssqlDatabaseFileType": { + "type": "string", + "description": "File type of a database file.", + "enum": [ + "Data", + "Log", + "Filestream" + ] + }, + "MssqlDbDefaults": { + "type": "object", + "required": [ + "cbtStatus", + "logBackupFrequencyInSeconds", + "logRetentionTimeInHours" + ], + "properties": { + "logBackupFrequencyInSeconds": { + "type": "integer", + "format": "int64" + }, + "cbtStatus": { + "type": "boolean", + "description": "True to enable a CBT-based backup, false to disable a CBT-based backup." + }, + "logRetentionTimeInHours": { + "type": "integer", + "format": "int32" + } + } + }, + "MssqlDbDefaultsUpdate": { + "type": "object", + "properties": { + "logBackupFrequencyInSeconds": { + "type": "integer", + "format": "int64" + }, + "cbtStatus": { + "type": "boolean", + "description": "True to enable a CBT-based backup, false to disable a CBT-based backup." + }, + "logRetentionTimeInHours": { + "type": "integer", + "format": "int32" + } + } + }, + "MssqlDbDetail": { + "allOf": [ + { + "$ref": "#/definitions/MssqlDbSummary" + }, + { + "$ref": "#/definitions/BlackoutWindowResponseInfo" + }, + { + "type": "object", + "required": [ + "snapshotCount" + ], + "properties": { + "snapshotCount": { + "type": "integer", + "format": "int32" + }, + "isLocal": { + "type": "boolean" + }, + "isStandby": { + "type": "boolean", + "description": "This field is deprecated. Use the isStandby field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases." + }, + "latestRecoveryPoint": { + "type": "string", + "format": "date" + }, + "oldestRecoveryPoint": { + "type": "string", + "format": "date" + }, + "protectionDate": { + "type": "string", + "format": "date" + }, + "recoveryForkGuid": { + "type": "string", + "description": "This field is deprecated. Use the recoveryForkGuid field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases." + }, + "maxDataStreams": { + "type": "integer", + "format": "int32" + }, + "localStorage": { + "type": "integer", + "format": "int64" + }, + "archiveStorage": { + "type": "integer", + "format": "int64" + }, + "preBackupScript": { + "$ref": "#/definitions/MssqlScriptDetail" + }, + "postBackupScript": { + "$ref": "#/definitions/MssqlScriptDetail" + } + } + } + ] + }, + "MssqlDbFileExportPath": { + "type": "object", + "required": [ + "exportPath", + "logicalName" + ], + "properties": { + "logicalName": { + "type": "string", + "description": "Logical name of the database file." + }, + "exportPath": { + "type": "string", + "description": "The target path for the database file." + }, + "newLogicalName": { + "type": "string", + "description": "New logical name for the database file." + }, + "newFilename": { + "type": "string", + "description": "New filename for the database file." + } + } + }, + "MssqlDbReplica": { + "type": "object", + "required": [ + "hasPermissions", + "instanceId", + "instanceName", + "isArchived", + "isDeleted", + "isStandby", + "recoveryModel", + "rootProperties", + "state" + ], + "properties": { + "instanceId": { + "type": "string", + "description": "ID of the SQL Server instance managing the replica." + }, + "instanceName": { + "type": "string", + "description": "Name of the SQL Server instance managing the replica." + }, + "recoveryModel": { + "type": "string", + "description": "The recovery model of the replica.", + "enum": [ + "SIMPLE", + "FULL", + "BULK_LOGGED" + ] + }, + "state": { + "type": "string", + "description": "The state of the replica." + }, + "hasPermissions": { + "type": "boolean", + "description": "`True` when the Rubrik cluster has sufficient permissions to perform all necessary operations." + }, + "isStandby": { + "type": "boolean", + "description": "`True` when the replica is in standby mode." + }, + "recoveryForkGuid": { + "type": "string", + "description": "The recovery fork GUID of the replica." + }, + "isArchived": { + "type": "boolean", + "description": "Deprecated. Please use 'isDeleted' instead." + }, + "isDeleted": { + "type": "boolean", + "description": "`True` when the replica is deleted." + }, + "availabilityInfo": { + "description": "For an availability database, provides additional information about a database replica.", + "$ref": "#/definitions/MssqlDbReplicaAvailabilityInfo" + }, + "rootProperties": { + "$ref": "#/definitions/MssqlRootProperties" + } + } + }, + "MssqlDbReplicaAvailabilityInfo": { + "type": "object", + "required": [ + "role" + ], + "properties": { + "role": { + "type": "string", + "description": "Role of the availability database replica. Possible values are: `PRIMARY`, `SECONDARY`, or `RESOLVING`.", + "enum": [ + "PRIMARY", + "SECONDARY", + "RESOLVING" + ] + } + } + }, + "MssqlDbSlaObjectCount": { + "type": "object", + "properties": { + "numDbs": { + "type": "integer", + "format": "int32", + "description": "The number of actively protected databases under this SLA domain." + } + } + }, + "MssqlDbSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "copyOnly", + "hasPermissions", + "id", + "isInAvailabilityGroup", + "isLiveMount", + "isLogShippingSecondary", + "isOnline", + "isRelic", + "logBackupFrequencyInSeconds", + "logBackupRetentionHours", + "name", + "numMissedSnapshot", + "primaryClusterId", + "replicas", + "rootProperties", + "unprotectableReasons" + ], + "properties": { + "rootProperties": { + "$ref": "#/definitions/MssqlRootProperties" + }, + "id": { + "type": "string" + }, + "instanceId": { + "type": "string", + "description": "This field is deprecated. Use the instanceId field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases." + }, + "instanceName": { + "type": "string", + "description": "This field is deprecated. Use the instanceName field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases." + }, + "isRelic": { + "type": "boolean" + }, + "primaryClusterId": { + "type": "string" + }, + "copyOnly": { + "type": "boolean", + "description": "Boolean value that specifies whether or not to perform copy-only backups of the database. When true, database backups are copy-only backups. When false, database backups are full backups." + }, + "logBackupFrequencyInSeconds": { + "type": "integer", + "format": "int32" + }, + "logBackupRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Hours to keep a log backup. A value of -1 indicates that a log will only expire when the preceding snapshots have expired." + }, + "name": { + "type": "string" + }, + "isLiveMount": { + "type": "boolean", + "description": "Boolean value that specifies whether a database object is a Live Mount. Value is 'true' when the database object is a Live Mount." + }, + "isLogShippingSecondary": { + "type": "boolean", + "description": "Boolean value that specifies whether a database object represents a secondary database. Value is 'true' when the database object represents a secondary database in a log shipping configuration." + }, + "recoveryModel": { + "type": "string", + "description": "This field is deprecated. Use the recoveryModel field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases.", + "enum": [ + "SIMPLE", + "FULL", + "BULK_LOGGED" + ] + }, + "state": { + "type": "string", + "description": "This field is deprecated. Use the state field on the replicas list instead. This field will continue to work for non-availability databases, but it is meaningless for availability databases." + }, + "hasPermissions": { + "type": "boolean", + "description": "A Boolean value that specifies whether the cluster has permission to back up the database. When this value is 'true', the cluster has permission to back up the database." + }, + "isInAvailabilityGroup": { + "type": "boolean" + }, + "replicas": { + "type": "array", + "description": "List of replicas of this database. An availability database may have multiple replicas, while other databases will have only one replica.", + "items": { + "$ref": "#/definitions/MssqlDbReplica" + } + }, + "availabilityGroupId": { + "type": "string", + "description": "For an availability database, the ID of the availability group that the database belongs to." + }, + "unprotectableReasons": { + "type": "array", + "description": "A list of reasons that a SQL Server database cannot be protected by the Rubrik CDM.", + "items": { + "type": "string" + } + }, + "numMissedSnapshot": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of missed snapshots. Only available in the /v1/mssql/db endpoint request body. The information will not be available for other endpoints." + }, + "lastSnapshotTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp of the previous snapshot.. Only available in the /v1/mssql/db endpoint request body. The information will not be available for other endpoints." + }, + "includeBackupTaskInfo": { + "type": "boolean", + "description": "True/false value indicating if backup task information is included in the response." + }, + "currentBackupTaskInfo": { + "description": "Information about the current backup task.", + "$ref": "#/definitions/BackupTaskDiagnosticInfo" + }, + "isOnline": { + "type": "boolean", + "description": "A Boolean value that specifies whether the database is in the ONLINE state. When this value is 'true', the database is in the ONLINE state." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "MssqlDbUpdate": { + "allOf": [ + { + "$ref": "#/definitions/MssqlSlaPatchProperties" + }, + { + "type": "object", + "properties": { + "maxDataStreams": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallel data streams that can be used to back up the database." + }, + "isPaused": { + "type": "boolean", + "description": "Whether to pause or resume backups/archival for this database." + }, + "preBackupScript": { + "$ref": "#/definitions/MssqlScriptDetail" + }, + "postBackupScript": { + "$ref": "#/definitions/MssqlScriptDetail" + }, + "shouldForceFull": { + "type": "boolean", + "description": "Determines whether to force a full for the next snapshot of a SQL Server database. When this value is true, the Rubrik cluster takes a full snapshot. This value is false by default and is reset to false after a successful full snapshot." + } + } + } + ] + }, + "MssqlDbUpdateId": { + "type": "object", + "required": [ + "databaseId", + "updateProperties" + ], + "properties": { + "databaseId": { + "type": "string", + "description": "ID of the Microsoft SQL database." + }, + "updateProperties": { + "$ref": "#/definitions/MssqlDbUpdate" + } + } + }, + "MssqlInstanceShortSummary": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "activeNode": { + "type": "string", + "description": "Active node of the instance in a Windows server failover cluster. Populated only if the node belongs to a Windows server failover cluster." + }, + "networkName": { + "type": "string", + "description": "Network name of the instance in a Windows server failover cluster. Populated only if the node belongs to a Windows server failover cluster." + } + } + }, + "MssqlInstanceSummary": { + "allOf": [ + { + "$ref": "#/definitions/MssqlSlaRelatedProperties" + }, + { + "type": "object", + "required": [ + "id", + "primaryClusterId", + "rootProperties" + ], + "properties": { + "id": { + "type": "string" + }, + "internalTimestamp": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "rootProperties": { + "$ref": "#/definitions/MssqlRootProperties" + }, + "clusterInstanceAddress": { + "type": "string", + "description": "The address of the instance in a Windows server failover cluster, populated only if it belongs to one." + }, + "protectionDate": { + "type": "string", + "format": "date" + }, + "version": { + "type": "string" + }, + "configuredSlaDomainId": { + "type": "string", + "description": "SLA Domain ID assigned to instance." + }, + "configuredSlaDomainType": { + "type": "string", + "description": "Specifies whether the SLA Domain is used for protection or retention." + }, + "configuredSlaDomainName": { + "type": "string", + "description": "SLA Domain name assigned to instance." + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when the SLA Domain assigned to the instance is Retention Locked and false when it is not." + }, + "unprotectableReasons": { + "type": "array", + "description": "A list of reasons that all the SQL Server databases in a SQL Server instance cannot be protected by the Rubrik CDM.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "MssqlLogShippingCreateConfig": { + "allOf": [ + { + "$ref": "#/definitions/MssqlLogShippingTargetStateOptions" + }, + { + "type": "object", + "required": [ + "targetDatabaseName", + "targetInstanceId" + ], + "properties": { + "maxDataStreams": { + "type": "integer", + "format": "int32", + "description": "Maximum number of parallel data streams that can be used to copy data to the target system." + }, + "targetDatabaseName": { + "type": "string", + "description": "The name of the secondary database." + }, + "targetDataFilePath": { + "type": "string", + "description": "The path to the default target location for data file storage." + }, + "targetFilePaths": { + "type": "array", + "description": "Array of database file storage paths. Each path is the target storage location for a database file. Values in this array override the values in targetDataFilePath and targetLogFilePath for the specified database files.", + "items": { + "$ref": "#/definitions/MssqlDbFileExportPath" + } + }, + "targetInstanceId": { + "type": "string", + "description": "The ID of the SQL Server instance that hosts the secondary database." + }, + "targetLogFilePath": { + "type": "string", + "description": "The path to the location of the log files." + } + } + } + ] + }, + "MssqlLogShippingLinks": { + "type": "object", + "required": [ + "primaryDatabase", + "secondaryInstance" + ], + "properties": { + "primaryDatabase": { + "$ref": "#/definitions/Link" + }, + "secondaryInstance": { + "$ref": "#/definitions/Link" + }, + "secondaryDatabase": { + "$ref": "#/definitions/Link" + }, + "seedRequest": { + "$ref": "#/definitions/Link" + } + } + }, + "MssqlLogShippingListSortAttribute": { + "type": "string", + "description": "Field used for sorts when enumerating log shipping entries.", + "enum": [ + "secondaryDatabaseName", + "primaryDatabaseName", + "lastAppliedPoint", + "location" + ] + }, + "MssqlLogShippingOkState": { + "type": "string", + "description": "Secondary database states for log shipping configurations with a status of OK.", + "enum": [ + "RESTORING", + "STANDBY" + ] + }, + "MssqlLogShippingStatus": { + "type": "string", + "description": "Status of the log shipping configuration.", + "enum": [ + "OK", + "Broken", + "Initializing", + "Stale" + ] + }, + "MssqlLogShippingStatusInfo": { + "type": "object", + "required": [ + "message", + "status" + ], + "properties": { + "status": { + "$ref": "#/definitions/MssqlLogShippingStatus" + }, + "message": { + "type": "string", + "description": "Detailed message describing the status of the log shipping configuration." + } + } + }, + "MssqlLogShippingSummary": { + "type": "object", + "required": [ + "id", + "location", + "primaryDatabaseId", + "primaryDatabaseLogBackupFrequency", + "primaryDatabaseName", + "secondaryDatabaseName", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the log shipping configuration object." + }, + "lastAppliedPoint": { + "type": "string", + "format": "date-time", + "description": "Timestamp of the last transaction applied using the specified log shipping configuration object." + }, + "location": { + "type": "string", + "description": "Location of a specified secondary database. Location uses this format: \"host/instance\"." + }, + "primaryDatabaseId": { + "type": "string", + "description": "ID of the primary database." + }, + "primaryDatabaseName": { + "type": "string", + "description": "Name of the primary database." + }, + "primaryDatabaseLogBackupFrequency": { + "type": "integer", + "format": "int64", + "description": "Log backup frequency, in seconds, of the primary database." + }, + "secondaryDatabaseName": { + "type": "string", + "description": "Name of the secondary database." + }, + "secondaryDatabaseId": { + "type": "string", + "description": "ID of the secondary database." + }, + "state": { + "type": "string", + "description": "The current state of the secondary database." + }, + "lagTime": { + "type": "integer", + "format": "int64", + "description": "Number of milliseconds elapsed since the latest backup was applied to the secondary database and the time the backup was taken on the primary database." + }, + "status": { + "$ref": "#/definitions/MssqlLogShippingStatusInfo" + } + } + }, + "MssqlLogShippingTargetStateOptions": { + "type": "object", + "required": [ + "state" + ], + "properties": { + "state": { + "$ref": "#/definitions/MssqlLogShippingOkState" + }, + "shouldDisconnectStandbyUsers": { + "type": "boolean", + "description": "Specifies whether to automatically disconnect users from a secondary database in standby mode when a restore operation is performed. If this value is set to false and users remain connected, any scheduled restore operations fail. If the \"state\" field is `RESTORING`, this value can be omitted and is ignored." + } + } + }, + "MssqlLogShippingUpdate": { + "allOf": [ + { + "$ref": "#/definitions/MssqlLogShippingTargetStateOptions" + } + ] + }, + "MssqlRestoreEstimateResult": { + "type": "object", + "required": [ + "bytesFromCloud" + ], + "properties": { + "bytesFromCloud": { + "type": "integer", + "format": "int64", + "description": "Estimate of the number of bytes to be downloaded from the cloud." + } + } + }, + "MssqlRestoreFile": { + "type": "object", + "required": [ + "fileId", + "fileType", + "logicalName", + "originalName", + "originalPath" + ], + "properties": { + "logicalName": { + "type": "string", + "description": "Logical name of the database file to be restored." + }, + "originalPath": { + "type": "string", + "description": "Original path to the database file to be restored." + }, + "originalName": { + "type": "string", + "description": "Original filename of the database file to be restored." + }, + "fileType": { + "$ref": "#/definitions/MssqlDatabaseFileType" + }, + "fileId": { + "type": "integer", + "format": "int64", + "description": "Original file ID of the database file to be restored." + } + } + }, + "MssqlRootProperties": { + "type": "object", + "required": [ + "rootName", + "rootType" + ], + "properties": { + "rootType": { + "type": "string", + "description": "Type of the root object for this object. The root object is the top-level object from which this object is derived. If this object is an availability database, the root object is **_MssqlAvailabilityGroup_**. Otherwise, if this object is part of a cluster, the root object is **_WindowsCluster_**. Otherwise, the root object is **_Host_**.", + "enum": [ + "Host", + "WindowsCluster", + "MssqlAvailabilityGroup" + ] + }, + "rootId": { + "type": "string", + "description": "ID of the root of this object." + }, + "rootName": { + "type": "string", + "description": "Name of the root of this object." + }, + "rootRole": { + "type": "string", + "description": "Role of the root object for this object if the root object is a Host and part of a **_MssqlAvailabilityGroup_**." + } + } + }, + "MssqlScriptDetail": { + "type": "object", + "required": [ + "scriptErrorAction", + "scriptPath", + "timeoutMs" + ], + "properties": { + "scriptPath": { + "type": "string", + "description": "The script to be run." + }, + "timeoutMs": { + "type": "integer", + "format": "int64", + "description": "Time (in ms) after which the script will be terminated if it has not completed." + }, + "scriptErrorAction": { + "description": "Action to take if the script returns an error or times out.", + "$ref": "#/definitions/ScriptErrorAction" + } + }, + "x-rk-nullable-properties": [ + "scriptPath", + "timeoutMs", + "scriptErrorAction" + ] + }, + "MssqlSlaDomainAssignInfo": { + "allOf": [ + { + "$ref": "#/definitions/MssqlSlaPatchProperties" + }, + { + "type": "object", + "required": [ + "ids" + ], + "properties": { + "ids": { + "type": "array", + "description": "List of SQL Server object IDs which should be assigned these properties.\n", + "items": { + "type": "string" + } + }, + "existingSnapshotRetention": { + "$ref": "#/definitions/ExistingSnapshotRetention" + } + } + } + ] + }, + "MssqlSlaPatchProperties": { + "allOf": [ + { + "$ref": "#/definitions/MssqlSlaRelatedProperties" + }, + { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "SLA Domain ID assigned to instance. Existing snapshots of the instance will be retained with the configuration of specified SLA Domain." + }, + "useConfiguredDefaultLogRetention": { + "type": "boolean", + "description": "Determines whether to use the configured default value of log backup retention." + } + } + } + ] + }, + "MssqlSlaRelatedProperties": { + "type": "object", + "properties": { + "logBackupFrequencyInSeconds": { + "type": "integer", + "format": "int32", + "description": "Seconds between two log backups. A value of 0 disables log backup." + }, + "logRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Number of hours to retain a log backup. When the value is set to -1 the Rubrik cluster retains the log backup until the database snapshots that precede the log backup have expired." + }, + "copyOnly": { + "type": "boolean", + "description": "Boolean value that specifies whether or not to perform copy-only backups of the database. When true, database backups are copy-only backups. When false, database backups are full backups." + } + } + }, + "MssqlSnappableId": { + "type": "object", + "required": [ + "snappableId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID of the protected object." + } + } + }, + "NetworkThrottleResourceId": { + "type": "string", + "description": "Resource types that support network throttling.\n", + "enum": [ + "ArchivalEgress", + "ReplicationEgress" + ] + }, + "NetworkThrottleScheduleSummary": { + "type": "object", + "required": [ + "daysOfWeek", + "endTime", + "startTime", + "throttleLimit" + ], + "properties": { + "throttleLimit": { + "type": "number", + "format": "double", + "description": "Network bandwidth throttle limit for a resource, in Mbps. The throttle limit is precise to two decimal places." + }, + "daysOfWeek": { + "type": "array", + "description": "Array of int32 values that represent the days of the week on which to apply a scheduled network throttle. The days of the week are represented from 1-7 with Sunday as 1.", + "items": { + "type": "integer", + "format": "int32" + } + }, + "startTime": { + "type": "integer", + "format": "int64", + "description": "An int64 value that represents the start time for a scheduled network throttle. The start time should be an hour of the day in minutes. For example, 0, 12*60 and 24*60 are valid values." + }, + "endTime": { + "type": "integer", + "format": "int64", + "description": "An int64 value that represents the end time for a scheduled network throttle. The end time should be an hour of the day in minutes. For example, 0, 12*60 and 24*60 are valid values." + } + } + }, + "NetworkThrottleSummary": { + "type": "object", + "required": [ + "isEnabled", + "networkInterface", + "resourceId", + "scheduledThrottles" + ], + "properties": { + "resourceId": { + "description": "ID assigned to a resource that can be managed with a network throttle.", + "$ref": "#/definitions/NetworkThrottleResourceId" + }, + "networkInterface": { + "type": "string", + "description": "The network interface where outgoing traffic is throttled." + }, + "defaultThrottleLimit": { + "type": "number", + "format": "double", + "description": "Default throttle limit for a resource, in Mbps. The throttle limit is precise to two decimal places." + }, + "scheduledThrottles": { + "type": "array", + "description": "An array containing all of the scheduled throttle limits for the specified resource.", + "items": { + "$ref": "#/definitions/NetworkThrottleScheduleSummary" + } + }, + "isEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether a throttle limit is enabled." + } + } + }, + "NetworkThrottleSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NetworkThrottleSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NetworkThrottleUpdate": { + "type": "object", + "properties": { + "defaultThrottleLimit": { + "type": "number", + "format": "double", + "description": "Default throttle limit for a resource, in Mbps. The throttle limit is precise to two decimal places." + }, + "networkInterface": { + "type": "string", + "description": "The network interface where outgoing traffic is throttled." + }, + "scheduledThrottles": { + "type": "array", + "description": "An array containing all of the scheduled throttle limits for a specified resource.", + "items": { + "$ref": "#/definitions/NetworkThrottleScheduleSummary" + } + }, + "isEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether a throttle limit is enabled. Set to true to enable the throttle limit, or set to false to disable the throttle limit." + } + } + }, + "ActiveSession": { + "type": "object", + "required": [ + "creationTime", + "id", + "lastUsageTime", + "username" + ], + "properties": { + "id": { + "type": "string", + "description": "Session identifier." + }, + "username": { + "type": "string", + "description": "Username for the corresponding session." + }, + "creationTime": { + "type": "string", + "format": "date-time", + "description": "Time when session was created." + }, + "lastUsageTime": { + "type": "string", + "format": "date-time", + "description": "Time of last REST API request during this session.\n" + }, + "lastUsageSourceIp": { + "type": "string", + "description": "Source IP address of last session." + }, + "lastRequestPath": { + "type": "string", + "description": "Last request made for this session." + }, + "tag": { + "type": "string", + "description": "Name assigned to the token by the user." + }, + "expiration": { + "type": "string", + "format": "date-time", + "description": "Expiration time of the token in UTC." + }, + "tokenType": { + "type": "string", + "description": "The type of session that this token corresponds to.\n" + } + } + }, + "ActiveSessionListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ActiveSession" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AutoRemovedNodeStatus": { + "type": "object", + "required": [ + "id", + "removedOn" + ], + "properties": { + "id": { + "type": "string", + "description": "Identifier of the node that was automatically removed." + }, + "removedOn": { + "type": "string", + "format": "date-time", + "description": "Time when the node was removed." + } + } + }, + "AutoRemovedNodeStatusListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AutoRemovedNodeStatus" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DiskInfo": { + "allOf": [ + { + "$ref": "#/definitions/DiskStatus" + }, + { + "type": "object", + "required": [ + "capacityBytes", + "path" + ], + "properties": { + "capacityBytes": { + "type": "integer", + "format": "int64" + }, + "path": { + "type": "string" + }, + "unallocatedBytes": { + "type": "integer", + "format": "int64" + }, + "usableBytes": { + "type": "integer", + "format": "int64" + } + } + } + ] + }, + "DiskInfoListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DiskInfo" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DiskStatus": { + "type": "object", + "required": [ + "diskType", + "id", + "isDegraded", + "isEncrypted", + "nodeId", + "status" + ], + "properties": { + "id": { + "type": "string" + }, + "status": { + "type": "string" + }, + "isEncrypted": { + "type": "boolean" + }, + "isDegraded": { + "type": "boolean" + }, + "diskType": { + "type": "string" + }, + "nodeId": { + "type": "string" + } + } + }, + "IoStat": { + "type": "object", + "required": [ + "ioThroughput", + "iops" + ], + "properties": { + "iops": { + "$ref": "#/definitions/Iops" + }, + "ioThroughput": { + "$ref": "#/definitions/IoThroughput" + } + } + }, + "IoThroughput": { + "type": "object", + "required": [ + "readBytePerSecond", + "writeBytePerSecond" + ], + "properties": { + "readBytePerSecond": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + }, + "writeBytePerSecond": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "Iops": { + "type": "object", + "required": [ + "readsPerSecond", + "writesPerSecond" + ], + "properties": { + "readsPerSecond": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + }, + "writesPerSecond": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "NetworkStat": { + "type": "object", + "required": [ + "bytesReceived", + "bytesTransmitted" + ], + "properties": { + "bytesReceived": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + }, + "bytesTransmitted": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + }, + "NodeInfo": { + "allOf": [ + { + "$ref": "#/definitions/NodeStatus" + }, + { + "type": "object", + "required": [ + "cpuCores", + "hdd", + "networkSpeed", + "ram", + "ssd", + "systemTime" + ], + "properties": { + "cpuCores": { + "type": "integer", + "format": "int64" + }, + "ram": { + "type": "integer", + "format": "int64" + }, + "networkSpeed": { + "type": "string" + }, + "hdd": { + "type": "array", + "items": { + "$ref": "#/definitions/DiskInfo" + } + }, + "ssd": { + "type": "array", + "items": { + "$ref": "#/definitions/DiskInfo" + } + }, + "systemTime": { + "type": "string", + "format": "date-time", + "description": "The system time on a node." + } + } + } + ] + }, + "NodeObjectSortAttribute": { + "type": "string", + "description": "Attribute by which the node objects are sorted.", + "enum": [ + "Status", + "Id", + "Name", + "Ip" + ] + }, + "NodeStats": { + "allOf": [ + { + "$ref": "#/definitions/NodeStatus" + }, + { + "$ref": "#/definitions/IoStat" + }, + { + "type": "object", + "required": [ + "cpuStat", + "networkStat" + ], + "properties": { + "networkStat": { + "$ref": "#/definitions/NetworkStat" + }, + "cpuStat": { + "type": "array", + "items": { + "$ref": "#/definitions/TimeStat" + } + } + } + } + ] + }, + "NodeStatsListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NodeStats" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "NodeStatus": { + "type": "object", + "required": [ + "brikId", + "hasUnavailableDisks", + "id", + "status" + ], + "properties": { + "id": { + "type": "string" + }, + "brikId": { + "type": "string" + }, + "status": { + "type": "string" + }, + "ipAddress": { + "type": "string" + }, + "supportTunnel": { + "$ref": "#/definitions/SupportTunnelInfo" + }, + "hasUnavailableDisks": { + "type": "boolean" + }, + "hostname": { + "type": "string", + "description": "Hostname of the node." + } + } + }, + "NodeStatusListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/NodeStatus" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SupportTunnelInfo": { + "type": "object", + "required": [ + "isTunnelEnabled" + ], + "properties": { + "isTunnelEnabled": { + "type": "boolean", + "description": "True if the support tunnel is enabled on this node. False otherwise." + }, + "port": { + "type": "integer", + "format": "int64", + "description": "The port used to tunnel traffic. Port number will be deprecated in the future when we transition to new cloud infrastructure that does not rely on unique port numbers." + }, + "enabledTime": { + "type": "string", + "format": "date-time", + "description": "Time when the tunnel was enabled or omitted when the tunnel is not enabled." + }, + "lastActivityTime": { + "type": "string", + "format": "date-time", + "description": "Time when the tunnel was last used or omitted if the tunnel is not enabled." + }, + "inactivityTimeoutInSeconds": { + "type": "integer", + "format": "int64", + "description": "Inactivity timeout in seconds or omitted if the tunnel is not enabled." + }, + "errorMessage": { + "type": "string", + "description": "Error message when unable to open support tunnel." + } + } + }, + "ObjectType": { + "type": "string", + "description": "Type of object.", + "enum": [ + "AppBlueprint", + "AwsAccount", + "CloudCompute", + "CloudComputeRegion", + "CloudNativeAuthzRoot", + "ComputeCluster", + "DataCenter", + "DataStore", + "Ec2Instance", + "ExclusionPattern", + "ExclusionPatternAuthzRoot", + "Folder", + "Hdfs", + "HostFailoverCluster", + "HostRoot", + "HypervAuthzRoot", + "HypervCluster", + "HypervScvmm", + "HypervServer", + "HypervVirtualMachine", + "FailoverClusterApp", + "KuprHost", + "KuprHostAuthzRoot", + "LinuxFileset", + "LinuxHost", + "LinuxHostAuthzRoot", + "ManagedVolume", + "ManagedVolumeAuthzRoot", + "ManagedVolumeRoot", + "MssqlAuthzRoot", + "MssqlDatabase", + "MssqlAvailabilityGroup", + "MssqlInstance", + "NasHost", + "NasHostAuthzRoot", + "NasSystem", + "NfsHostShare", + "NutanixAuthzRoot", + "NutanixCluster", + "NutanixVirtualMachine", + "OracleAuthzRoot", + "OracleDatabase", + "OracleHost", + "OracleRac", + "OracleRoot", + "SapHanaAuthzRoot", + "SapHanaDatabase", + "SapHanaSystem", + "ShareFileset", + "SlaDomain", + "SmbHostShare", + "StorageArray", + "StorageArrayVolume", + "StorageArrayVolumeGroup", + "Storm", + "User", + "vCenter", + "Vcd", + "VcdAuthzRoot", + "VcdCatalog", + "VcdOrg", + "VcdOrgVdc", + "VcdVapp", + "VcdVimServer", + "VirtualMachine", + "VmwareAuthzRoot", + "VmwareHost", + "VmwareResourcePool", + "VmwareStoragePolicy", + "VmwareTag", + "VmwareTagCategory", + "WindowsCluster", + "WindowsFileset", + "WindowsHost", + "WindowsHostAuthzRoot", + "WindowsVolumeGroup" + ] + }, + "OdsConfigurationSummary": { + "type": "object", + "required": [ + "odsPolicyOnPause" + ], + "properties": { + "odsPolicyOnPause": { + "description": "The policy to be followed for an on-demand snapshot request during a pause.", + "$ref": "#/definitions/OdsPolicyOnPause" + } + } + }, + "OdsPolicyOnPause": { + "type": "object", + "required": [ + "schedulingType" + ], + "properties": { + "schedulingType": { + "description": "An enum which represents the on-demand snapshot policy during a pause.", + "$ref": "#/definitions/SchedulingType" + } + } + }, + "SchedulingType": { + "type": "string", + "description": "Policies that are available to use for taking on-demand snapshot during an effective pause.", + "enum": [ + "TakeImmediately", + "TakeOnResume" + ] + }, + "OracleHierarchyObjectDescendentCount": { + "type": "object", + "properties": { + "rac": { + "type": "integer", + "format": "int32" + }, + "oracleHost": { + "type": "integer", + "format": "int32" + }, + "db": { + "type": "integer", + "format": "int32" + } + } + }, + "OracleHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendentCount", + "isDeleted", + "objectType", + "primaryClusterId" + ], + "properties": { + "objectType": { + "description": "Type of Oracle objects.", + "$ref": "#/definitions/ObjectType" + }, + "connectionStatus": { + "description": "Connection status of Oracle RAC or standalone host.", + "$ref": "#/definitions/OracleConnectionTypes" + }, + "descendentCount": { + "$ref": "#/definitions/OracleHierarchyObjectDescendentCount" + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the Oracle hierarchy object is deleted." + }, + "sid": { + "type": "string", + "description": "System identifier (SID) of the Oracle hierarchy object. This valid only when the object type is Oracle database." + }, + "numNodes": { + "type": "integer", + "format": "int32", + "description": "Count of the number of nodes on the Oracle hierarchy object. This is valid only when the object type is Oracle RAC or standalone host." + }, + "nodes": { + "type": "array", + "description": "Details of the nodes of this Oracle hierarchy object. This is valid only when the object type is Oracle RAC or standalone host.", + "items": { + "$ref": "#/definitions/OracleNodeProperties" + } + }, + "numInstances": { + "type": "integer", + "format": "int32", + "description": "Count of the number of instances of the Oracle hierarchy object. This is valid only when the object type is Oracle Database." + }, + "instances": { + "type": "array", + "description": "Details of the instances of the Oracle hierarchy object. This is valid only when the object type is Oracle database.", + "items": { + "$ref": "#/definitions/OracleInstanceProperties" + } + }, + "status": { + "type": "string", + "description": "Connectivity status of the Oracle hierarchy object." + }, + "isArchiveLogModeEnabled": { + "type": "boolean", + "description": "Boolean value that indicates whether the ARCHIVELOGMODE is enabled on the Oracle database. This is valid only when the object type is Oracle database." + }, + "numDbs": { + "type": "integer", + "format": "int32", + "description": "Count of the number of databases on the Oracle hierarchy object. This is valid only when the object type is Oracle Rac or Host." + }, + "numTablespaces": { + "type": "integer", + "format": "int32", + "description": "Count of the number of table spaces in Oracle hierarchy object. This is valid only when the object type is Oracle database." + }, + "standaloneHostName": { + "type": "string", + "description": "Hostname of the Oracle hierarchy object. This is valid only when the object type is Oracle database." + }, + "racName": { + "type": "string", + "description": "RAC name of Oracle hierarchy object. This is valid only when the object type is Oracle database." + }, + "primaryClusterId": { + "type": "string" + }, + "nodeOrder": { + "type": "array", + "description": "Specifies an order for the RAC nodes. Automated Oracle backups use the RAC nodes in the specified order.", + "items": { + "$ref": "#/definitions/OracleNodeOrder" + } + }, + "logBackupFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. This interval is the period between successive log backups." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + }, + "hostsInfo": { + "type": "array", + "description": "An array that contains the hosts info for the Oracle hierarchy object. This is valid only when the object type is Oracle Rac or Host.", + "items": { + "$ref": "#/definitions/HostInfo" + } + }, + "dbUniqueName": { + "type": "string", + "description": "Unique name for the Oracle database (DB_UNIQUE_NAME)." + }, + "databaseRole": { + "type": "string", + "description": "Current role of the database." + }, + "dataGuardGroupId": { + "type": "string", + "description": "Rubrik ID of the Data Guard group to which this database belongs." + }, + "dataGuardGroupName": { + "type": "string", + "description": "Name of the Data Guard group to which this database belongs." + } + } + } + ] + }, + "OracleHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DataGuardGroupMember": { + "type": "object", + "required": [ + "dbUniqueName", + "role" + ], + "properties": { + "dbUniqueName": { + "type": "string", + "description": "Unique name of the member Oracle database." + }, + "role": { + "type": "string", + "description": "Current role of the member Oracle database." + }, + "racId": { + "type": "string", + "description": "Rubrik ID of the RAC on which this database is hosted. This field is empty when the database is not hosted on a RAC environment." + }, + "standaloneHostId": { + "type": "string", + "description": "Rubrik ID of the standalone Oracle host on which this database is hosted. This field is empty when the database is not hosted on a standalone system." + }, + "standaloneHostName": { + "type": "string", + "description": "Name of the standalone Oracle database host." + }, + "racName": { + "type": "string", + "description": "Cluster name assigned to the Oracle RAC." + } + } + }, + "DataGuardType": { + "type": "string", + "description": "Specifies whether this database is a Data Guard member, Data Guard group, or a non-Data Guard database.", + "enum": [ + "DataGuardGroup", + "DataGuardMember", + "NonDataGuard" + ] + }, + "ExportOracleDbConfig": { + "type": "object", + "required": [ + "recoveryPoint", + "targetOracleHostOrRacId" + ], + "properties": { + "recoveryPoint": { + "description": "Snapshot ID or timestamp for which the clone is done.", + "$ref": "#/definitions/OracleRecoveryPoint" + }, + "targetOracleHostOrRacId": { + "type": "string", + "description": "ID of the Oracle Host or Oracle RAC object that is the target for the clone of the specified database snapshot. The referenced Oracle host or RAC must have the Rubrik Backup Service installed and connected. Standalone source databases can be cloned to OracleHost and clustered source databases can be cloned to OracleRac only." + }, + "targetMountPath": { + "type": "string", + "description": "The full path for the directory on the target host where the NFS share will be mounted." + }, + "shouldRestoreFilesOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether the database files are copied to the target host without recreating the database. When 'true,' the database is not recreated. When 'false,' the database is recreated. The default value is 'false.'", + "default": false + }, + "restoreFilesPath": { + "type": "string", + "description": "The full path for the directory on the target host to use to store the restored database files." + }, + "archiveLogPath": { + "type": "string", + "description": "The full path for the directory containing the archive log files on the Oracle host." + }, + "cloneDbName": { + "type": "string", + "description": "The new value of the db_name parameter for a clone operation. This is used to specify the new name during rman duplicate." + }, + "customPfilePath": { + "type": "string", + "description": "The full path of the pfile on the target Oracle Host or RAC to use for the database recovery." + }, + "advancedRecoveryConfigBase64": { + "type": "string", + "description": "The configuration file for Oracle advanced recovery in base64 encoded format. This field cannot be specified if `advancedRecoveryConfigMap` is specified." + }, + "advancedRecoveryConfigMap": { + "description": "A key-value map that specifies the configuration parameters for Oracle advanced recovery. This field cannot be specified if `advanceRecoveryConfigBase64` is specified.", + "$ref": "#/definitions/Map_String" + }, + "preScriptPath": { + "type": "string", + "description": "Path to the pre-script to run before the recovery task." + }, + "postScriptPath": { + "type": "string", + "description": "Path to the post-script to run after the recovery task." + }, + "shouldStopRecoveryOnPreScriptFailure": { + "type": "boolean", + "description": "Boolean value that determines whether to stop the recovery task if the pre-script exits with a non-zero value. Set to True to stop the recovery task on pre-script failure. The default setting is False, which allows the task to continue.", + "default": false + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels used during clone or same-host recovery." + } + } + }, + "ExportOracleTablespaceConfig": { + "type": "object", + "required": [ + "auxiliaryDestinationPath", + "recoveryPoint", + "tablespaceName" + ], + "properties": { + "recoveryPoint": { + "description": "Snapshot ID or timestamp for which the export is done.", + "$ref": "#/definitions/OracleRecoveryPoint" + }, + "tablespaceName": { + "type": "string", + "description": "Name of the tablespace to be exported from the existing database snapshot." + }, + "auxiliaryDestinationPath": { + "type": "string", + "description": "The full path to the directory on the source host where the auxiliary database files will be created." + }, + "exposeAllLogs": { + "type": "boolean", + "description": "Expose all logs that were backed up between the selected recovery point and the latest log backup.", + "default": false + } + } + }, + "HostInfo": { + "type": "object", + "required": [ + "hostname", + "id", + "oracleQueryUser", + "oracleSysDbaUser" + ], + "properties": { + "id": { + "type": "string", + "description": "The managed id of the host." + }, + "hostname": { + "type": "string", + "description": "Name of the host." + }, + "oracleSysDbaUser": { + "type": "string", + "description": "Oracle sysdba user to use on the host." + }, + "oracleQueryUser": { + "type": "string", + "description": "Oracle discovery user." + } + } + }, + "ManagedObjectDescendantCountOracleFields": { + "type": "object", + "properties": { + "oracleDatabase": { + "type": "integer", + "format": "int32", + "description": "Number of Oracle databases." + } + } + }, + "MountOracleDbConfig": { + "type": "object", + "required": [ + "recoveryPoint", + "targetOracleHostOrRacId" + ], + "properties": { + "recoveryPoint": { + "description": "Snapshot ID or timestamp for which the mount is done.", + "$ref": "#/definitions/OracleRecoveryPoint" + }, + "targetOracleHostOrRacId": { + "type": "string", + "description": "ID of the Oracle Host or Oracle RAC object for the created database. The referenced Oracle host or RAC must have the Rubrik Backup Service installed and connected. Standalone source databases can be live mounted to OracleHost and clustered source databases can be live mounted to OracleRac only." + }, + "targetMountPath": { + "type": "string", + "description": "The full path on the target host where the NFS share with the snapshot files will be mounted." + }, + "shouldMountFilesOnly": { + "type": "boolean", + "description": "A Boolean value that determines whether the database files are mounted to the target host without recreating the database. When 'true', the database is not recreated. When 'false', the database is recreated. The default value is 'false'.", + "default": false + }, + "customPfilePath": { + "type": "string", + "description": "The full path of the pfile on the target Oracle Host or RAC to use for the database recovery." + }, + "advancedRecoveryConfigBase64": { + "type": "string", + "description": "The configuration file for Oracle advanced recovery in base64 encoded format. This field cannot be specified if `advancedRecoveryConfigMap` is specified." + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels used during live mount." + }, + "advancedRecoveryConfigMap": { + "description": "A key-value map that specifies the configuration parameters for Oracle advanced recovery. This field cannot be specified if `advanceRecoveryConfigBase64` is specified.", + "$ref": "#/definitions/Map_String" + }, + "preScriptPath": { + "type": "string", + "description": "Path to the pre-script to run before the recovery task." + }, + "postScriptPath": { + "type": "string", + "description": "Path to the post-script to run after the recovery task." + }, + "shouldStopRecoveryOnPreScriptFailure": { + "type": "boolean", + "description": "Boolean value that determines whether to stop the recovery task if the pre-script exits with a non-zero value. Set to True to stop the recovery task on pre-script failure. The default setting is False, which allows the task to continue.", + "default": false + } + } + }, + "MountedDbFilter": { + "type": "string", + "description": "Type of filter to use when retrieving a list of Oracle database Live Mount objects.", + "enum": [ + "SourceDatabaseName", + "MountedDatabaseName", + "CreationDate" + ] + }, + "OracleBackupJobConfig": { + "allOf": [ + { + "$ref": "#/definitions/BaseOnDemandSnapshotConfig" + }, + { + "type": "object", + "properties": { + "forceFullSnapshot": { + "type": "boolean", + "description": "Boolean value that indicates whether to force a full snapshot for the specified Oracle database object. Set to true to force a full snapshot. Set to false to allow the Rubrik cluster to determine the type of snapshot required." + } + } + } + ] + }, + "OracleConnectionTypes": { + "type": "string", + "description": "Connection type options for Oracle hosts.", + "enum": [ + "Connected", + "Disconnected", + "PartiallyConnected" + ] + }, + "OracleDataGuardGroupUpdate": { + "allOf": [ + { + "$ref": "#/definitions/OracleUpdateCommon" + }, + { + "type": "object", + "properties": { + "shouldBackupFromPrimaryOnly": { + "type": "boolean", + "description": "Value that indicates whether to backup from the PRIMARY member only, or from any available member." + }, + "preferredDGMemberUniqueNames": { + "type": "array", + "description": "Ordered list of database unique names to use for backup.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "OracleDbDetail": { + "allOf": [ + { + "$ref": "#/definitions/OracleDbSummary" + }, + { + "$ref": "#/definitions/OracleNonSlaProperties" + }, + { + "type": "object", + "required": [ + "snapshotCount", + "tablespaces" + ], + "properties": { + "tablespaces": { + "type": "array", + "description": "An array that contains tablespace names of the specified Oracle database.", + "items": { + "type": "string" + } + }, + "snapshotCount": { + "type": "integer", + "format": "int32" + }, + "latestRecoveryPoint": { + "type": "string", + "format": "date", + "description": "The time stamp of the most recent recovery point for this database." + }, + "oldestRecoveryPoint": { + "type": "string", + "format": "date", + "description": "The time stamp of the earliest recovery point for this database." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + }, + "lastValidationResult": { + "description": "General information about last validation job.", + "$ref": "#/definitions/OracleLastValidationResult" + }, + "oracleHome": { + "type": "string", + "description": "Oracle Home of the Oracle database." + }, + "isLiveMount": { + "type": "boolean", + "description": "Value that indicates whether an Oracle database object is a Live Mount or not. A true value indicates that the object is a Live Mount." + }, + "hostsInfo": { + "type": "array", + "description": "An array that contains the host info for each instance.", + "items": { + "$ref": "#/definitions/HostInfo" + } + }, + "shouldBackupFromPrimaryDGGroupMemberOnly": { + "type": "boolean", + "description": "Indicates whether to backup only from the PRIMARY Data Guard member or from any available member." + }, + "preferredDGMemberUniqueNames": { + "type": "array", + "description": "Ordered list of database unique names to use for backup in a Data Guard group.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "OracleDbSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "$ref": "#/definitions/BaseSnapshotDetail" + }, + { + "type": "object", + "properties": { + "database": { + "type": "string" + } + } + } + ] + }, + "OracleDbSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "databaseName", + "tablespaces" + ], + "properties": { + "databaseName": { + "type": "string" + }, + "tablespaces": { + "type": "array", + "description": "Array containing descriptions of the tablespaces that were captured in the specified snapshot.", + "items": { + "type": "string" + } + }, + "isValid": { + "type": "boolean", + "description": "A Boolean that specifies whether the snapshot is valid." + } + } + } + ] + }, + "OracleDbSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleDbSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleDbSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "hostLogRetentionHours", + "id", + "infraPath", + "isDbLocalToTheCluster", + "isRelic", + "name", + "numMissedSnapshot", + "numTablespaces", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the Oracle database." + }, + "name": { + "type": "string", + "description": "Service name of the Oracle database." + }, + "primaryClusterId": { + "type": "string" + }, + "infraPath": { + "type": "array", + "description": "An array that contains information about the objects in the infrastructure path of a specified Oracle database.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "isRelic": { + "type": "boolean", + "description": "Boolean value that indicates whether a Oracle database object is in an archived state and has retained snapshots. Value is true when the object is archived with retained snapshots." + }, + "numTablespaces": { + "type": "integer", + "format": "int32", + "description": "Count of the number of table spaces in Oracle database." + }, + "logBackupFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. This interval is the period between successive log backups." + }, + "numMissedSnapshot": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of missed snapshots." + }, + "lastSnapshotTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp of the previous snapshot." + }, + "includeBackupTaskInfo": { + "type": "boolean", + "description": "True/false value indicating if backup task information is included in the response." + }, + "currentBackupTaskInfo": { + "description": "Information about the current backup task.", + "$ref": "#/definitions/BackupTaskDiagnosticInfo" + }, + "isDbLocalToTheCluster": { + "type": "boolean", + "description": "A Boolean value that specifies whether the Oracle database is local to the cluster. When this value is 'true', the Oracle database is local to the cluster." + }, + "hostLogRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in hours. The next log snapshot job deletes archived Oracle redo log files whose 'nextTime' field specifies a time more than the specified number of hours ago. To immediately delete archived redo log files regardless of age, specify an interval of -1. To preserve all archived redo log files, specify an interval of -2." + }, + "sid": { + "type": "string", + "description": "System identifier (SID) of the Oracle database." + }, + "racId": { + "type": "string", + "description": "Rubrik ID of the RAC on which this database is hosted. This field will be empty if the database is not hosted on a RAC environment." + }, + "standaloneHostId": { + "type": "string", + "description": "Rubrik ID of the standalone Oracle host on which this database is hosted. This field will be empty if the database is not hosted on a standalone system." + }, + "numInstances": { + "type": "integer", + "format": "int32", + "description": "Count of the number of instances of the Oracle database." + }, + "instances": { + "type": "array", + "description": "Details of the instances of the Oracle database.", + "items": { + "$ref": "#/definitions/OracleInstanceProperties" + } + }, + "isArchiveLogModeEnabled": { + "type": "boolean", + "description": "Boolean value that indicates whether the ARCHIVELOG mode is enabled on the Oracle database or not." + }, + "standaloneHostName": { + "type": "string", + "description": "Hostname of the standalone Oracle database host." + }, + "racName": { + "type": "string", + "description": "RAC name of the cluster database." + }, + "archiveLogDestinations": { + "type": "array", + "description": "An array that contains the archive log destinations for the specified Oracle database.", + "items": { + "type": "string" + } + }, + "isPrimary": { + "type": "boolean", + "description": "Indicates whether the current DATABASE_ROLE is PRIMARY which specifies the database is accepting read/write transactions as the primary database in a Data Guard configuration." + }, + "dbUniqueName": { + "type": "string", + "description": "Unique name for the Oracle database (DB_UNIQUE_NAME)." + }, + "databaseRole": { + "type": "string", + "description": "Current role of the database." + }, + "dataGuardType": { + "description": "Indicates whether this object is a non-Data Guard database, Data Guard member database, or Data Guard group.", + "$ref": "#/definitions/DataGuardType" + }, + "dataGuardGroupId": { + "type": "string", + "description": "Rubrik ID of the Data Guard group to which this database belongs." + }, + "dataGuardGroupName": { + "type": "string", + "description": "Name of the Data Guard group to which this database belongs." + }, + "dataGuardGroupMembers": { + "type": "array", + "description": "List of Data Guard group members.", + "items": { + "$ref": "#/definitions/DataGuardGroupMember" + } + } + } + } + ] + }, + "OracleDbSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleDbSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleHostDetail": { + "allOf": [ + { + "$ref": "#/definitions/OracleHostSummary" + }, + { + "$ref": "#/definitions/OracleNonSlaProperties" + } + ] + }, + "OracleHostSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "infraPath", + "name", + "numDbs", + "primaryClusterId", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the standalone Oracle host." + }, + "name": { + "type": "string", + "description": "Hostname of the standalone Oracle host." + }, + "status": { + "type": "string", + "description": "Connectivity status of the Oracle RAC." + }, + "numDbs": { + "type": "integer", + "format": "int32", + "description": "Count of the number of databases on the Oracle RAC." + }, + "primaryClusterId": { + "type": "string" + }, + "infraPath": { + "type": "array", + "description": "An array that contains information about the objects in the infrastructure path of a specified Oracle database.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + } + } + } + ] + }, + "OracleHostSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleHostSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleInstanceProperties": { + "type": "object", + "required": [ + "hostName", + "instanceSid" + ], + "properties": { + "hostName": { + "type": "string", + "description": "Name of the Oracle host." + }, + "instanceSid": { + "type": "string", + "description": "System identifier (SID) of the Oracle database instance." + } + } + }, + "OracleLastValidationResult": { + "type": "object", + "required": [ + "eventSeriesId", + "isSuccess", + "validationTime" + ], + "properties": { + "eventSeriesId": { + "type": "string", + "description": "The eventseries ID for the last validation job." + }, + "isSuccess": { + "type": "boolean", + "description": "A Boolean that specifies whether the last validation successfully completed." + }, + "validationTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp of the recovery point to validate." + } + } + }, + "OracleMissedRecoverableRange": { + "type": "object", + "required": [ + "beginTime", + "description", + "endTime", + "errorType" + ], + "properties": { + "beginTime": { + "type": "string", + "format": "date-time" + }, + "endTime": { + "type": "string", + "format": "date-time" + }, + "errorType": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "OracleMissedRecoverableRangeListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleMissedRecoverableRange" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleMountDetail": { + "allOf": [ + { + "$ref": "#/definitions/OracleMountSummary" + }, + { + "type": "object", + "required": [ + "targetHostMount" + ], + "properties": { + "mountRequestId": { + "type": "string", + "description": "ID of the async request object for the mount task." + }, + "unmountRequestId": { + "type": "string", + "description": "ID of the async request object for the delete task." + }, + "targetHostMount": { + "type": "string", + "description": "The full path for the directory on the target host where the NFS share is mounted." + } + } + } + ] + }, + "OracleMountSummary": { + "type": "object", + "required": [ + "creationDate", + "id", + "isFilesOnlyMount", + "mountedDatabaseName", + "sourceDatabaseId", + "sourceDatabaseName", + "status", + "targetHostId", + "targetHostname" + ], + "properties": { + "id": { + "type": "string" + }, + "sourceDatabaseId": { + "type": "string" + }, + "sourceDatabaseName": { + "type": "string" + }, + "targetHostId": { + "type": "string", + "description": "ID assigned to the Oracle host or RAC object where the Oracle database was created." + }, + "targetHostname": { + "type": "string", + "description": "Name of the Oracle host. or RAC where the Oracle database was created." + }, + "creationDate": { + "type": "string", + "format": "date-time", + "description": "The time what the mount was created." + }, + "ownerId": { + "type": "string", + "description": "ID of the user account that created the mount." + }, + "ownerName": { + "type": "string", + "description": "Name of the user account that created the mount." + }, + "status": { + "description": "The current status of the mount. When the status is **Available**, the mount is ready to use.", + "$ref": "#/definitions/MountStatus" + }, + "mountedDatabaseId": { + "type": "string", + "description": "ID assigned to the mount object for the Oracle database." + }, + "mountedDatabaseName": { + "type": "string", + "description": "Oracle service name of the mounted Oracle database." + }, + "isInstantRecovered": { + "type": "boolean", + "description": "Indicates whether this mount was created during an Instant Recovery or Live Mount." + }, + "isFilesOnlyMount": { + "type": "boolean", + "description": "Indicates whether this mount is a files only mount or not." + } + } + }, + "OracleMountSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleMountSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleNodeOrder": { + "type": "object", + "required": [ + "nodeName", + "order" + ], + "properties": { + "nodeName": { + "type": "string", + "description": "Nodename of the Oracle RAC node." + }, + "order": { + "type": "integer", + "format": "int32", + "description": "Order in which Rubrik uses this node for automated Oracle backup." + } + } + }, + "OracleNodeProperties": { + "type": "object", + "required": [ + "nodeName", + "status" + ], + "properties": { + "nodeName": { + "type": "string", + "description": "Node name of the Oracle RAC node." + }, + "status": { + "type": "string", + "description": "Connectivity status of the Oracle RAC node." + } + } + }, + "OracleNonSlaProperties": { + "type": "object", + "required": [ + "hostLogRetentionHours", + "hostMount", + "logBackupFrequencyInMinutes", + "logRetentionHours", + "numChannels" + ], + "properties": { + "logBackupFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. This interval is the period between successive log backups." + }, + "logRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in hours. Log backups are retained for the duration of the interval." + }, + "hostLogRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in hours. The next log snapshot job deletes archived Oracle redo log files whose 'nextTime' field specifies a time more than the specified number of hours ago. To immediately delete archived redo log files regardless of age, specify an interval of -1. To preserve all archived redo log files, specify an interval of -2." + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels used to backup the Oracle database." + }, + "hostMount": { + "type": "string", + "description": "Path where the NFS share is mounted on the host." + } + } + }, + "OracleRacDetail": { + "allOf": [ + { + "$ref": "#/definitions/OracleRacSummary" + }, + { + "$ref": "#/definitions/OracleNonSlaProperties" + }, + { + "type": "object", + "required": [ + "scan" + ], + "properties": { + "scan": { + "type": "string", + "description": "Single Client Access Name (SCAN) of the Oracle RAC cluster." + } + } + } + ] + }, + "OracleRacSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "name", + "nodeOrder", + "nodes", + "numDbs", + "numNodes", + "primaryClusterId", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the Oracle RAC." + }, + "name": { + "type": "string", + "description": "Cluster name assigned to the Oracle RAC." + }, + "numNodes": { + "type": "integer", + "format": "int32", + "description": "Count of the number of nodes on the Oracle RAC." + }, + "nodes": { + "type": "array", + "description": "Details of the nodes of this Oracle RAC.", + "items": { + "$ref": "#/definitions/OracleNodeProperties" + } + }, + "nodeOrder": { + "type": "array", + "description": "Specifies an order for the RAC nodes. Automated Oracle backups use the RAC nodes in the specified order.", + "items": { + "$ref": "#/definitions/OracleNodeOrder" + } + }, + "status": { + "type": "string", + "description": "Connectivity status of the Oracle RAC." + }, + "numDbs": { + "type": "integer", + "format": "int32", + "description": "Count of the number of databases on the Oracle RAC." + }, + "primaryClusterId": { + "type": "string" + } + } + } + ] + }, + "OracleRacSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleRacSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleRecoverableRange": { + "type": "object", + "required": [ + "beginTime", + "dbSnapshotSummaries", + "endTime", + "status" + ], + "properties": { + "beginTime": { + "type": "string", + "format": "date-time" + }, + "endTime": { + "type": "string", + "format": "date-time" + }, + "status": { + "type": "string" + }, + "dbSnapshotSummaries": { + "type": "array", + "description": "Database snapshots that fall within the recoverable range.", + "items": { + "$ref": "#/definitions/OracleDbSnapshotSummary" + } + } + } + }, + "OracleRecoverableRangeListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OracleRecoverableRange" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OracleRecoveryPoint": { + "type": "object", + "properties": { + "snapshotId": { + "type": "string", + "description": "Snapshot ID of the Oracle database." + }, + "timestampMs": { + "type": "integer", + "format": "int64", + "description": "A timestamp in milliseconds that specifies a recovery point." + } + } + }, + "OracleUpdate": { + "allOf": [ + { + "$ref": "#/definitions/OracleUpdateCommon" + }, + { + "type": "object", + "properties": { + "configuredSlaDomainIdDeprecated": { + "type": "string", + "description": "ID of the SLA domain protecting the specified Oracle object. Log backup jobs are no longer scheduled if the SLA domain indicates the Oracle object is unprotected. The specified SLA domain is not used to configure the protection or retention for this Oracle object. This is a DEPRECATED field, and will be removed in later releases." + }, + "nodeOrder": { + "type": "array", + "description": "Specifies an order for the RAC nodes. Automated Oracle backups use the RAC nodes in the specified order.", + "items": { + "$ref": "#/definitions/OracleNodeOrder" + } + } + } + } + ] + }, + "OracleUpdateCommon": { + "type": "object", + "properties": { + "logBackupFrequencyInMinutes": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in minutes. This interval is the period between successive log backups." + }, + "logRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in hours. Log backups are retained for the duration of the interval." + }, + "hostLogRetentionHours": { + "type": "integer", + "format": "int32", + "description": "Specifies an interval in hours. For Oracle archived redo log files whose nextTime is before (now - interval), the next log snapshot job will delete them from the host. Set to 0 for inheriting the value from its parent; -1 for immediate deletion; and -2 to skip log deletion." + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels used to backup the Oracle database." + }, + "hostMount": { + "type": "string", + "description": "Path where the NFS share is mounted on the host." + } + } + }, + "RecoverOracleDbConfig": { + "type": "object", + "required": [ + "recoveryPoint" + ], + "properties": { + "recoveryPoint": { + "description": "Snapshot ID or timestamp for which the export is done.", + "$ref": "#/definitions/OracleRecoveryPoint" + }, + "numChannels": { + "type": "integer", + "format": "int32", + "description": "Number of channels used during instant recovery." + } + } + }, + "OrgExclusivenessLevel": { + "type": "string", + "description": "The default exclusiveness level for authorizations on resources granted to the organization.", + "enum": [ + "NotExclusive", + "OrgAndDescendants" + ] + }, + "OrganizationCreate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the Organization." + }, + "exclusivenessLevel": { + "description": "The default exclusiveness level for authorizations on resources granted to the organization.", + "$ref": "#/definitions/OrgExclusivenessLevel" + } + } + }, + "OrganizationDetail": { + "allOf": [ + { + "$ref": "#/definitions/OrganizationSummary" + }, + { + "type": "object", + "required": [ + "admins" + ], + "properties": { + "admins": { + "type": "array", + "description": "All of the Org Admins for this Organization.", + "items": { + "$ref": "#/definitions/OrganizationPrincipalSummary" + } + } + } + } + ] + }, + "OrganizationDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OrganizationDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OrganizationPrincipalDef": { + "type": "object", + "required": [ + "accessLevel", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The id of the Principal." + }, + "accessLevel": { + "type": "array", + "description": "The level of access this principal has on the Organization. NOTE 1) An empty array specifies no access 2) The ManageSla and ManageUser access levels require OrgAdmin access.", + "items": { + "type": "string", + "enum": [ + "OrgAdmin", + "ManageSla", + "ManageUser" + ] + } + } + } + }, + "OrganizationPrincipalSummary": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "The id of this principal." + }, + "name": { + "type": "string", + "description": "The name of this principal." + } + } + }, + "OrganizationResourceMetric": { + "type": "object", + "required": [ + "numNoSla", + "numProtected", + "numTotal" + ], + "properties": { + "numProtected": { + "type": "integer", + "format": "int64", + "description": "Protected object count." + }, + "numTotal": { + "type": "integer", + "format": "int64", + "description": "Total object count." + }, + "numNoSla": { + "type": "integer", + "format": "int64", + "description": "Object count with no sla." + } + } + }, + "OrganizationStat": { + "type": "object", + "required": [ + "logical", + "physical" + ], + "properties": { + "logical": { + "$ref": "#/definitions/OfflineStatSummary" + }, + "physical": { + "$ref": "#/definitions/OfflineStatSummary" + } + } + }, + "OrganizationState": { + "type": "string", + "description": "Global represents the global organization. The other organizations can be either Archived or Active.", + "enum": [ + "Global", + "Active", + "Archived" + ] + }, + "OrganizationSummary": { + "type": "object", + "required": [ + "id", + "isGlobal", + "name", + "roleId" + ], + "properties": { + "id": { + "type": "string", + "description": "The unique id of the Organization." + }, + "name": { + "type": "string", + "description": "The name of the Organization." + }, + "isGlobal": { + "type": "boolean", + "description": "Whether the organization is the Global Organization." + }, + "envoyStatus": { + "type": "string", + "description": "Connection state of the specified Rubrik Envoy object." + }, + "exclusivenessLevel": { + "description": "The default exclusiveness level for authorizations on resources granted to the organization.", + "$ref": "#/definitions/OrgExclusivenessLevel" + }, + "roleId": { + "type": "string", + "description": "The ID for the organization's role." + } + } + }, + "OrganizationSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/OrganizationSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OrganizationUpdate": { + "allOf": [ + { + "$ref": "#/definitions/OrganizationCreate" + } + ] + }, + "PlatformEnum": { + "type": "string", + "description": "The computing platform that is running the Rubrik CDM software.", + "enum": [ + "dev", + "va", + "ce", + "vc", + "cloud", + "leancloud", + "r300", + "r500", + "fattwin", + "c220m4", + "c240m4", + "c220m5", + "c240m5", + "thinkserversd350", + "dl360", + "dl380", + "r6000S", + "r6000SE", + "r6000F", + "r6408", + "r6408M", + "pe6420", + "pe740hd", + "pe740", + "e1000", + "f10000", + "rkdev" + ] + }, + "RegistrationRequirement": { + "type": "string", + "description": "Indicates the registration requirement of the cluster 'RegistrationRequired' - Registration is mandatory and valid support community credentials are required after cluster bootstrap. 'RegistrationNotRequired' - Registration is not mandatory and user will not be prompted to enter support community credentials. 'SuggestRegistration' - User will be prompted for support community credentials during cluster bootstrap. User can choose to skip this step. However, bootstrap will fail if the given credentials are invalid. 'SuggestRegistrationAllowFailure'- Special case of SuggestRegistration. User will be prompted for support community credentials during cluster bootstrap.User can choose to skip this step. Bootstrap will continue even if provided credentials are invalid.", + "enum": [ + "RegistrationRequired", + "RegistrationNotRequired", + "SuggestRegistration", + "SuggestRegistrationAllowFailure" + ] + }, + "AwsComputeSettingDefinition": { + "type": "object", + "required": [ + "awsCustomerAccountPolarisManagedId", + "polarisManagedId", + "region", + "securityGroupId", + "subnetId", + "versionNumber", + "vpcId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris managed ID of the AWS compute setting object." + }, + "subnetId": { + "type": "string" + }, + "vpcId": { + "type": "string" + }, + "securityGroupId": { + "type": "string" + }, + "region": { + "type": "string" + }, + "computeProxyPolarisManagedId": { + "type": "string", + "description": "Polaris managed ID of the proxy setting used." + }, + "awsCustomerAccountPolarisManagedId": { + "type": "string", + "description": "The Polaris Managed ID of the customer account that owns the compute settings.\n" + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the AWS compute setting object." + } + } + }, + "AwsComputeSettingDetail": { + "allOf": [ + { + "$ref": "#/definitions/AwsComputeSettingDefinition" + }, + { + "type": "object", + "required": [ + "customerAccountId", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The CDM AwsComputeSettings table ID associated with the entry.\n" + }, + "customerAccountId": { + "type": "string", + "description": "The CDM AwsCustomerAccount table ID associated with the entry.\n" + }, + "computeProxySettingId": { + "type": "string", + "description": "The CDM ProxySetting table ID associated with the entry." + } + } + } + ] + }, + "AwsCustomerAccountDefinition": { + "allOf": [ + { + "$ref": "#/definitions/AwsCustomerAccountSummary" + }, + { + "type": "object", + "required": [ + "awsIamUserDefinition", + "externalId" + ], + "properties": { + "externalId": { + "type": "string" + }, + "awsIamUserDefinition": { + "description": "Summary for the Rubrik AWS IAM user with secret key.\n", + "$ref": "#/definitions/AwsIamUserDefinition" + } + } + } + ] + }, + "AwsCustomerAccountDetail": { + "allOf": [ + { + "$ref": "#/definitions/AwsCustomerAccountSummary" + }, + { + "type": "object", + "required": [ + "awsIamUserDetail", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The CDM AwsCustomerAccount table ID associated with the entry.\n" + }, + "awsIamUserDetail": { + "description": "Summary for the Rubrik AWS IAM user.\n", + "$ref": "#/definitions/AwsIamUserDetail" + } + } + } + ] + }, + "AwsCustomerAccountSummary": { + "allOf": [ + { + "$ref": "#/definitions/AwsCustomerAccountRoleSummary" + }, + { + "type": "object", + "required": [ + "cloudFormationStackDetailMap", + "polarisManagedId", + "policyVersionDetailMap", + "versionNumber" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the AWS customer account object." + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the AWS Customer Account object." + }, + "cloudFormationStackDetailMap": { + "type": "array", + "description": "Map of feature ID to cloud formation stack ARN associated with each IAM based role created in the customer account.\n", + "items": { + "$ref": "#/definitions/CloudFormationStackDetail" + } + }, + "policyVersionDetailMap": { + "type": "array", + "items": { + "$ref": "#/definitions/PolicyVersionDetail" + } + } + } + } + ] + }, + "AwsIamUserDefinition": { + "allOf": [ + { + "$ref": "#/definitions/AwsIamUserSummary" + }, + { + "type": "object", + "required": [ + "secretKey" + ], + "properties": { + "secretKey": { + "type": "string", + "description": "Secret key for the IAM user.", + "x-secret": true + } + } + } + ] + }, + "AwsIamUserDetail": { + "allOf": [ + { + "$ref": "#/definitions/AwsIamUserSummary" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID associated with the entry in AzureArchivalLocation table for this archival location.\n" + } + } + } + ] + }, + "AwsIamUserSummary": { + "type": "object", + "required": [ + "accessKey", + "awsAccountId", + "iamUserArn", + "iamUserId", + "iamUserName", + "polarisManagedId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris managed ID associated with the IAM user." + }, + "iamUserId": { + "type": "string", + "description": "User ID of the IAM user." + }, + "iamUserArn": { + "type": "string", + "description": "Amazon resource name of the IAM user." + }, + "iamUserName": { + "type": "string" + }, + "awsAccountId": { + "type": "string", + "description": "ID of Rubrik AWS account which owns the IAM user." + }, + "accessKey": { + "type": "string", + "description": "Access key for the IAM user." + } + }, + "description": "Summary of Rubrik IAM user created for every customer account for cross-account role based authentication.\n" + }, + "AzureComputeSettingDefinition": { + "type": "object", + "required": [ + "azureCustomerAccountPolarisManagedId", + "containerName", + "environment", + "generalPurposeStorageAccountName", + "networkSecurityGroupId", + "polarisManagedId", + "region", + "resourceGroupName", + "subnetId", + "versionNumber", + "vnetId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris managed ID of the Azure compute setting detail object.\n" + }, + "subnetId": { + "type": "string" + }, + "vnetId": { + "type": "string" + }, + "networkSecurityGroupId": { + "type": "string" + }, + "region": { + "type": "string" + }, + "computeProxyPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the proxy setting in use." + }, + "generalPurposeStorageAccountName": { + "type": "string" + }, + "containerName": { + "type": "string" + }, + "environment": { + "type": "string" + }, + "azureCustomerAccountPolarisManagedId": { + "type": "string", + "description": "The customer account ID that owns the compute settings." + }, + "resourceGroupName": { + "type": "string" + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the Azure compute setting object." + } + } + }, + "AzureComputeSettingDetail": { + "allOf": [ + { + "$ref": "#/definitions/AzureComputeSettingDefinition" + }, + { + "type": "object", + "required": [ + "customerAccountId", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The CDM AzureComputeSettings table ID associated with the entry.\n" + }, + "customerAccountId": { + "type": "string", + "description": "The CDM AzureCustomerAccount table ID associated with the entry.\n" + }, + "computeProxySettingId": { + "type": "string", + "description": "The CDM ProxySetting table ID associated with the entry." + } + } + } + ] + }, + "AzureCustomerAccountDefinition": { + "allOf": [ + { + "$ref": "#/definitions/AzureCustomerAccountSummary" + }, + { + "type": "object", + "required": [ + "azureRubrikAccountAppDefinition" + ], + "properties": { + "azureRubrikAccountAppDefinition": { + "description": "Details of the Azure Rubrik app that uses clientSecret to associate with the customer account for OAuth roles.\n", + "$ref": "#/definitions/AzureRubrikAccountAppDefinition" + } + } + } + ] + }, + "AzureCustomerAccountDetail": { + "allOf": [ + { + "$ref": "#/definitions/AzureCustomerAccountSummary" + }, + { + "type": "object", + "required": [ + "azureRubrikAccountAppDetail", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The CDM AzureCustomerAccount table ID associated with the entry.\n" + }, + "azureRubrikAccountAppDetail": { + "description": "Details of the Azure Rubrik app associated with the customer account for OAuth roles with clientSecret.\n", + "$ref": "#/definitions/AzureRubrikAccountAppDetail" + } + } + } + ] + }, + "AzureCustomerAccountSummary": { + "allOf": [ + { + "$ref": "#/definitions/AzureCustomerAccountRoleSummary" + }, + { + "type": "object", + "required": [ + "polarisManagedId", + "policyVersionDetailMap", + "versionNumber" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "The Polaris Managed ID of the Azure customer account object.\n" + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the Azure Customer Account object." + }, + "policyVersionDetailMap": { + "type": "array", + "items": { + "$ref": "#/definitions/PolicyVersionDetail" + } + } + } + } + ] + }, + "AzureRubrikAccountAppDefinition": { + "allOf": [ + { + "$ref": "#/definitions/AzureRubrikAccountAppSummary" + }, + { + "type": "object", + "required": [ + "appClientSecret" + ], + "properties": { + "appClientSecret": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "AzureRubrikAccountAppDetail": { + "allOf": [ + { + "$ref": "#/definitions/AzureRubrikAccountAppSummary" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The CDM Azure Rubrik Account App table ID associated with the entry.\n" + } + } + } + ] + }, + "AzureRubrikAccountAppSummary": { + "type": "object", + "required": [ + "appClientId", + "azureRubrikAccountId", + "polarisManagedId", + "redirectUri" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris managed ID of the object." + }, + "appClientId": { + "type": "string" + }, + "redirectUri": { + "type": "string" + }, + "azureRubrikAccountId": { + "type": "string", + "description": "The account ID of the Azure Rubrik account associated with the Azure account app.\n" + } + }, + "description": "Summary of the Rubrik Azure account app created for OAuth roles.\n" + }, + "CloudFormationStackDetail": { + "type": "object", + "required": [ + "cloudFormationStackArn", + "featureId" + ], + "properties": { + "featureId": { + "type": "string" + }, + "cloudFormationStackArn": { + "type": "string" + } + } + }, + "PolarisAwsArchivalDefinition": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAwsArchivalSummary" + }, + { + "type": "object", + "properties": { + "pemFileContent": { + "type": "string", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "PolarisAwsArchivalDetail": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAwsArchivalSummary" + }, + { + "type": "object", + "required": [ + "customerAccountId", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID associated with the entry in AwsArchivalLocation table for this archival location.\n" + }, + "customerAccountId": { + "type": "string", + "description": "The CDM AwsCustomerAccount table ID associated with the entry.\n" + }, + "computeSettingId": { + "type": "string", + "description": "The CDM AwsComputeSettings table ID associated with the entry.\n" + }, + "proxySettingId": { + "type": "string", + "description": "The CDM ProxySettings table ID associated with the entry.\n" + } + } + } + ] + }, + "PolarisAwsArchivalLocationSpec": { + "type": "object", + "required": [ + "awsCustomerAccountPolarisManagedId", + "bucket", + "name", + "polarisManagedId", + "storageClass", + "versionNumber" + ], + "properties": { + "name": { + "type": "string", + "description": "The archival location name." + }, + "polarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the AWS archival detail object." + }, + "bucket": { + "type": "string" + }, + "region": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "versionNumber": { + "type": "integer", + "format": "int32" + }, + "storageClass": { + "type": "string" + }, + "archivalProxyPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the proxy setting for the archival location.\n" + }, + "awsComputeSettingPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the compute setting of the archival location.\n" + }, + "awsCustomerAccountPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the AWS customer account associated with this archival location.\n" + }, + "cloudRehydrationSpeed": { + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. For AWS, three speed options are supported -- AwsExpedited, AwsStandard, and AwsBulk. AwsBulk is the cheapest and slowest. AwsExpedited is the fastest and most expensive. AwsStandard is the recommended default value.\n", + "$ref": "#/definitions/CloudStorageRehydrationSpeed" + } + }, + "description": "Specifications of the IAM based AWS archival location." + }, + "PolarisAwsArchivalSummary": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAwsArchivalLocationSpec" + }, + { + "type": "object", + "properties": { + "isConsolidationEnabled": { + "type": "boolean" + } + }, + "description": "Details of the AWS archival location created from Polaris." + } + ] + }, + "PolarisAwsReaderConnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAwsArchivalLocationSpec" + }, + { + "type": "object", + "properties": { + "pemFileContent": { + "type": "string", + "x-secret": true + }, + "kmsMasterKeyId": { + "type": "string", + "x-secret": true + }, + "shouldSkipScheduleRecoverArchivedMetadataJob": { + "type": "boolean", + "description": "A Boolean value that determines whether to schedule the archival recovery job. When the value is 'false,' the recovery job is scheduled normally. When the value is 'true,' the recovery job is not scheduled. The default behavior is to schedule the recovery job.\n" + } + } + } + ] + }, + "PolarisAzureArchivalDefinition": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAzureArchivalSummary" + }, + { + "type": "object", + "required": [ + "pemFileContent" + ], + "properties": { + "pemFileContent": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "PolarisAzureArchivalDetail": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAzureArchivalSummary" + }, + { + "type": "object", + "required": [ + "customerAccountId", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID associated with the entry in AzureArchivalLocation table for this archival location.\n" + }, + "customerAccountId": { + "type": "string", + "description": "The CDM AzureCustomerAccount table ID associated with the entry.\n" + }, + "computeSettingId": { + "type": "string", + "description": "The CDM AzureComputeSettings table ID associated with the entry.\n" + }, + "proxySettingId": { + "type": "string", + "description": "The CDM ProxySettings table ID associated with the entry.\n" + } + } + } + ] + }, + "PolarisAzureArchivalLocationSpec": { + "type": "object", + "required": [ + "azureCustomerAccountPolarisManagedId", + "container", + "name", + "polarisManagedId", + "storageAccountName", + "versionNumber" + ], + "properties": { + "name": { + "type": "string", + "description": "The archival location name." + }, + "polarisManagedId": { + "type": "string", + "description": "The polaris managed ID for the Azure archival location object.\n" + }, + "container": { + "type": "string" + }, + "storageAccountName": { + "type": "string" + }, + "versionNumber": { + "type": "integer", + "format": "int32" + }, + "endpoint": { + "type": "string" + }, + "archivalProxyPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the proxy setting of the archival location.\n" + }, + "azureComputeSettingPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the compute setting of the archival location.\n" + }, + "azureCustomerAccountPolarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the Azure customer account that created the archival location.\n" + }, + "cloudRehydrationSpeed": { + "description": "Specifies the retrieval speed option when retrieving data from the cold storage tier to the hot storage tier for restore purposes. Rubrik cannot directly restore data from the cold storage tier and the data must be first retrieved into the hot storage tier. The only option is AzureStandard.\n", + "$ref": "#/definitions/CloudStorageRehydrationSpeed" + } + }, + "description": "Specifications of the OAuth based Azure archival location." + }, + "PolarisAzureArchivalSummary": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAzureArchivalLocationSpec" + }, + { + "type": "object", + "properties": { + "isConsolidationEnabled": { + "type": "boolean" + } + }, + "description": "Details of the Azure archival location created from Polaris." + } + ] + }, + "PolarisAzureReaderConnectDefinition": { + "allOf": [ + { + "$ref": "#/definitions/PolarisAzureArchivalLocationSpec" + }, + { + "type": "object", + "required": [ + "pemFileContent" + ], + "properties": { + "pemFileContent": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "PolarisManagedNfsLocationCreationDefinition": { + "allOf": [ + { + "$ref": "#/definitions/NfsLocationCreationDefinition" + }, + { + "type": "object", + "required": [ + "polarisManagedId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris Managed ID of the archival location." + } + } + } + ] + }, + "PolarisManagedObjectStoreLocationDefinition": { + "allOf": [ + { + "$ref": "#/definitions/ObjectStoreLocationDefinition" + }, + { + "type": "object", + "required": [ + "polarisManagedId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris Managed ID of the archival location." + } + } + } + ] + }, + "PolicyVersionDetail": { + "type": "object", + "required": [ + "featureId", + "versionNumber" + ], + "properties": { + "featureId": { + "type": "string" + }, + "versionNumber": { + "type": "integer", + "format": "int64" + } + }, + "description": "Details for the feature and version number in the cross-account role created in the customer account.\n" + }, + "ProxyDefinition": { + "allOf": [ + { + "$ref": "#/definitions/LocationProxyConfig" + }, + { + "type": "object", + "required": [ + "polarisManagedId", + "versionNumber" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the proxy object." + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the Proxy Setting object." + } + } + } + ] + }, + "ProxyDetail": { + "allOf": [ + { + "$ref": "#/definitions/LocationProxySummary" + }, + { + "type": "object", + "required": [ + "id", + "polarisManagedId", + "versionNumber" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of the proxy object." + }, + "id": { + "type": "string", + "description": "The CDM ProxySettings table ID associated with the entry." + }, + "versionNumber": { + "type": "integer", + "format": "int32", + "description": "Polaris version of the Proxy Setting object." + } + } + } + ] + }, + "AuditType": { + "type": "string", + "description": "Audit source types.", + "enum": [ + "WindowsEventsAudit", + "NetAppCifsEventAudit" + ] + }, + "ExportInfoConfig": { + "type": "object", + "required": [ + "exportSpecs", + "objectStorage" + ], + "properties": { + "exportSpecs": { + "type": "array", + "description": "The objects that are undergoing metadata export.", + "items": { + "$ref": "#/definitions/ExportSpec" + } + }, + "objectStorage": { + "description": "Object storage config for uploading metadata information.", + "$ref": "#/definitions/ObjectStorageConfig" + }, + "earliestTimestamp": { + "type": "integer", + "format": "int64", + "description": "Optional. When this parameter has a value, the Rubrik cluster only synchronizes objects with a timestamp later than the specified value." + } + } + }, + "ExportSpec": { + "type": "object", + "required": [ + "filename", + "tableName" + ], + "properties": { + "filename": { + "type": "string", + "description": "The filename for uploading metadata information." + }, + "tableName": { + "type": "string", + "description": "The name of table the job is going to export." + }, + "columnNames": { + "type": "array", + "description": "Names of the columns that will be exported. If unspecified, all columns will be exported.", + "items": { + "type": "string" + } + } + } + }, + "ExportThriftInfoConfig": { + "type": "object", + "required": [ + "exportThriftSpecs", + "objectStorage" + ], + "properties": { + "exportThriftSpecs": { + "type": "array", + "description": "The objects whose metadata is being exported.", + "items": { + "$ref": "#/definitions/ExportThriftSpec" + } + }, + "objectStorage": { + "description": "Object storage configuration for uploading metadata information.", + "$ref": "#/definitions/ObjectStorageConfig" + } + } + }, + "ExportThriftSpec": { + "type": "object", + "required": [ + "exportType", + "filename" + ], + "properties": { + "filename": { + "type": "string", + "description": "The filename where the metadata of the requested snappables is written to." + }, + "exportType": { + "description": "The general type of metadata object.", + "$ref": "#/definitions/GeneralThriftExportType" + }, + "concreteType": { + "type": "string", + "description": "The concrete type whose metadata is to be exported. For example, VmwareVirtualMachine, Fileset." + } + } + }, + "GeneralThriftExportType": { + "type": "string", + "description": "The general category which a Thrift metadata object belongs to.", + "enum": [ + "Snappable", + "Snapshot" + ] + }, + "GetSnapshotsConfig": { + "type": "object", + "required": [ + "afterTime", + "beforeTime", + "objectStorage" + ], + "properties": { + "afterTime": { + "type": "integer", + "format": "int64", + "description": "Time that snapshots should have been updated after." + }, + "beforeTime": { + "type": "integer", + "format": "int64", + "description": "Time that snapshots should have been updated before." + }, + "objectStorage": { + "description": "Object storage config for uploading snapshot information.", + "$ref": "#/definitions/ObjectStorageConfig" + }, + "maxSnapshotsPerFile": { + "type": "integer", + "format": "int32", + "description": "Maximum number of snapshots to place in one file when uploading to object storage." + } + } + }, + "LastSyncedPendingActionSequenceNumber": { + "type": "object", + "required": [ + "sequenceNumber" + ], + "properties": { + "sequenceNumber": { + "type": "integer", + "format": "int64", + "description": "Sequence number of last synced Pending Action." + } + } + }, + "MetadataClusterInfo": { + "type": "object", + "required": [ + "clusterUuid", + "rubrikVersion", + "schemaVersion" + ], + "properties": { + "clusterUuid": { + "type": "string", + "description": "The UUID of the Rubrik cluster." + }, + "rubrikVersion": { + "type": "string", + "description": "The Rubrik CDM software version." + }, + "schemaVersion": { + "type": "integer", + "format": "int64", + "description": "The schema version of the metadata store on the Rubrik cluster." + } + } + }, + "MetadataQueryByIds": { + "type": "object", + "required": [ + "compositeIds", + "tableName" + ], + "properties": { + "tableName": { + "type": "string", + "description": "The name of the metadata store table." + }, + "compositeIds": { + "type": "array", + "description": "An array of composite row IDs for the metadata store table.", + "items": { + "type": "string" + } + }, + "columnNames": { + "type": "array", + "description": "An array of selected columns for the table. If columnNames is None, it means that all columns are selected.", + "items": { + "type": "string" + } + } + } + }, + "MetadataQueryByIdsResponse": { + "type": "object", + "required": [ + "clusterInfo", + "queryResults" + ], + "properties": { + "clusterInfo": { + "description": "Metadata store information from a Rubrik cluster.", + "$ref": "#/definitions/MetadataClusterInfo" + }, + "queryResults": { + "type": "array", + "description": "Results from metadata queries of a Rubrik cluster.", + "items": { + "$ref": "#/definitions/MetadataQueryResult" + } + } + } + }, + "MetadataQueryResult": { + "type": "object", + "required": [ + "rows", + "tableName" + ], + "properties": { + "tableName": { + "type": "string", + "description": "The name of the metadata store table." + }, + "rows": { + "type": "array", + "description": "An array of table rows which have composite IDs that match the specified query.", + "items": { + "$ref": "#/definitions/MetadataRow" + } + }, + "columnNames": { + "type": "array", + "description": "An array of columns rows have. It should be None only in older release for backward compatibility.", + "items": { + "type": "string" + } + } + } + }, + "MetadataRow": { + "type": "object", + "required": [ + "compositeId", + "serializedData" + ], + "properties": { + "compositeId": { + "type": "string", + "description": "A string value created from the row ID array by listing row IDs separated by :::.\n" + }, + "serializedData": { + "type": "string", + "description": "A JSON serialized row of data." + } + } + }, + "ObjectStorageConfig": { + "allOf": [ + { + "$ref": "#/definitions/ObjectStorageDetail" + }, + { + "type": "object", + "required": [ + "secretKey" + ], + "properties": { + "accessKey": { + "type": "string", + "description": "The access key to connect to object storage. Currently only used by S3 compatible storage.", + "x-secret": true + }, + "secretKey": { + "type": "string", + "description": "The secret key associated with the user/service account to access storage.", + "x-secret": true + } + } + } + ] + }, + "ObjectStorageDetail": { + "type": "object", + "required": [ + "bucketName", + "objectNamePrefix", + "storageProvider" + ], + "properties": { + "bucketName": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<> For AWS, bucket name also cannot contain capital letters or underscore.\n" + }, + "storageProvider": { + "type": "string", + "description": "Provider for this object storage.", + "enum": [ + "GoogleCloudPlatform", + "AmazonWebServices" + ] + }, + "objectNamePrefix": { + "type": "string", + "description": "The path prefix to prepend to objects uploaded to this object storage location." + }, + "endpoint": { + "type": "string", + "description": "Endpoint to connect to object storage. Only used by S3 compatible storage." + } + } + }, + "StorageProvider": { + "type": "string", + "description": "Object store type.", + "enum": [ + "GoogleCloudPlatform", + "AmazonWebServices", + "S3Compatible" + ] + }, + "ThriftAuthToken": { + "type": "object", + "required": [ + "tokenValue" + ], + "properties": { + "tokenValue": { + "type": "string", + "description": "The Thrift authentication token." + } + } + }, + "ThriftMetadataQueryByIds": { + "type": "object", + "required": [ + "exportType", + "ids" + ], + "properties": { + "exportType": { + "description": "The general type of metadata object.", + "$ref": "#/definitions/GeneralThriftExportType" + }, + "ids": { + "type": "array", + "description": "An array of ids of the metadata objects of the type matching the exporType parameter.", + "items": { + "type": "string" + } + } + } + }, + "ThriftMetadataQueryByIdsResponse": { + "type": "object", + "required": [ + "queryResults" + ], + "properties": { + "queryResults": { + "type": "array", + "description": "Result from metadata queries of a Rubrik cluster in serialized Thrift form.", + "items": { + "$ref": "#/definitions/ThriftMetadataQueryResult" + } + } + } + }, + "ThriftMetadataQueryResult": { + "type": "object", + "required": [ + "serializedMetadatas", + "serializedThriftHeader" + ], + "properties": { + "serializedThriftHeader": { + "type": "string", + "description": "A header in serialized Thrift containing the cluster UUID, Rubrik software version, and other information necessary for decoding serialized metadatas. It is encoded to a byte array using Thrift and from byte array to string using Base64." + }, + "serializedMetadatas": { + "type": "array", + "description": "The metadata query results in serialized Thrift form. Similar to the header, each metadata object is encoded to byte array using Thrift and from byte array to string using Base64.", + "items": { + "type": "string" + } + } + } + }, + "WindowsAuditConfiguration": { + "type": "object", + "required": [ + "blacklistStrings", + "powershellScripts", + "xpathSubscription" + ], + "properties": { + "powershellScripts": { + "type": "array", + "description": "A sequence of powershell scripts to enable auditing.", + "items": { + "type": "string" + } + }, + "xpathSubscription": { + "type": "string", + "description": "XPath query to subscribe to Windows security events." + }, + "blacklistStrings": { + "type": "array", + "description": "Strings that black-list captured security audit logs.", + "items": { + "type": "string" + } + } + } + }, + "WindowsAuditHost": { + "type": "object", + "required": [ + "id", + "isEnabled", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "Identifier for this Windows host." + }, + "name": { + "type": "string", + "description": "Windows host name." + }, + "isEnabled": { + "type": "boolean", + "description": "Whether this host is enabled for auditing." + } + } + }, + "SecurityDescriptor": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "type": "string", + "description": "File or folder path." + }, + "securityDescriptor": { + "type": "string", + "description": "DACL security descriptor, in SDDL format." + } + } + }, + "DisabledReplicationLocationDefinition": { + "type": "object", + "required": [ + "peerClusterName" + ], + "properties": { + "peerClusterName": { + "type": "string", + "description": "Name of the peer cluster." + } + } + }, + "EnableAsReplicationTargetDefinition": { + "type": "object", + "required": [ + "replicationSetup" + ], + "properties": { + "replicationSetup": { + "type": "string", + "description": "Network setup must be either 'NAT' or 'Private Network'." + }, + "targetClusterName": { + "type": "string", + "description": "Name of the target cluster." + }, + "targetClusterAddress": { + "type": "string", + "description": "Address of the target cluster." + }, + "targetGateway": { + "description": "Gateway information about the target cluster.", + "$ref": "#/definitions/GatewayInfo" + }, + "sourceGateway": { + "description": "Gateway information about the source cluster.", + "$ref": "#/definitions/GatewayInfo" + }, + "shouldEnableForZeroSlaAssignments": { + "type": "boolean", + "description": "Whether or not to enable the replication target if all the corresponding Polaris managed SLAs have zero assignments on the cluster. Default value will be true i.e always enable." + } + } + }, + "PolarisPullReplicateDefinition": { + "type": "object", + "required": [ + "accessKey", + "isOnDemand", + "polarisId", + "secretKey", + "snapshotInfo" + ], + "properties": { + "polarisId": { + "type": "string", + "description": "Managed ID of the Polaris source cluster." + }, + "snapshotInfo": { + "description": "Info of the snapshot which this cluster is replicating from Polaris.", + "$ref": "#/definitions/ReplicationSnapshotInfo" + }, + "accessKey": { + "type": "string", + "description": "The access key used for accessing customer's volumes to pull replicate snapshots." + }, + "secretKey": { + "type": "string", + "description": "The secret key used for accessing customer's volumes to pull replicate snapshots.", + "x-secret": true + }, + "isOnDemand": { + "type": "boolean", + "description": "Indicates if snapshot is on-demand." + } + } + }, + "PolarisReplicationSourceDefinition": { + "type": "object", + "required": [ + "accountId", + "accountName", + "polarisId" + ], + "properties": { + "polarisId": { + "type": "string", + "description": "The global unique ID that representing a Polaris account. This is the account database name in Polaris." + }, + "accountId": { + "type": "string" + }, + "accountName": { + "type": "string" + } + } + }, + "PolarisReplicationSourceRefreshDefinition": { + "type": "object", + "required": [ + "metadataPackageObjectName", + "metadataPackageSha1Digest", + "objectStorageConfig", + "packageCryptoInfo" + ], + "properties": { + "objectStorageConfig": { + "description": "Object storage config for Polaris source cluster metadata package.", + "$ref": "#/definitions/ObjectStorageConfig" + }, + "metadataPackageObjectName": { + "type": "string", + "description": "Key-value store key for finding Polaris source cluster metadata package." + }, + "metadataPackageSha1Digest": { + "type": "string", + "description": "SHA-1 digest of the content of this metadata package for verification." + }, + "packageCryptoInfo": { + "description": "Encryption details passed in from Polaris for decrypting the metadata packages.", + "$ref": "#/definitions/PolarisReplicationSourceRefreshPackageCryptoInfo" + } + } + }, + "PolarisReplicationSourceRefreshPackageCryptoInfo": { + "type": "object", + "required": [ + "decryptionKey", + "plainTextChunkSize" + ], + "properties": { + "decryptionKey": { + "type": "string", + "description": "Hex encoded one time key used to decrypt the package.", + "x-secret": true + }, + "plainTextChunkSize": { + "type": "integer", + "format": "int64", + "description": "Indicates the plain text chunk size used for encrypting the package." + } + } + }, + "PolarisReplicationSourceReplicatedSnappableSummary": { + "type": "object", + "required": [ + "id", + "snappableType" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the snappable stored on this cluster which has Polaris as the primary cluster." + }, + "snappableType": { + "type": "string", + "description": "The snappable type of this snappable." + } + } + }, + "PolarisReplicationSourceSummary": { + "type": "object", + "required": [ + "accountId", + "accountName", + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "accountId": { + "type": "string" + }, + "accountName": { + "type": "string" + } + } + }, + "ReplicationLocationSummary": { + "allOf": [ + { + "$ref": "#/definitions/DisabledReplicationLocationDefinition" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the peer cluster." + }, + "replicationTargetSummary": { + "description": "Summary of the replication location as a target.", + "$ref": "#/definitions/ReplicationTargetSummary" + } + } + } + ] + }, + "ReportJobInstanceResponse": { + "type": "object", + "required": [ + "data", + "nextAfterId", + "total" + ], + "properties": { + "data": { + "type": "array", + "description": "Report. job instance entry array.", + "items": { + "$ref": "#/definitions/ReportTaskEntry" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total number of report task entries in the response." + }, + "nextAfterId": { + "type": "string", + "description": "Use the after ID as the query parameter in the next query. in the next query." + } + } + }, + "ReportTaskCategory": { + "type": "string", + "description": "The report task category.", + "enum": [ + "Protection", + "Recovery" + ] + }, + "ReportTaskEntry": { + "type": "object", + "required": [ + "endTime", + "id", + "jobType", + "objectInfo", + "startTime", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "The partition key and clustering key composite ID of the report job instance table." + }, + "taskCategory": { + "description": "Task category.", + "$ref": "#/definitions/ReportTaskCategory" + }, + "jobType": { + "type": "string", + "description": "Job type of the job instance." + }, + "startTime": { + "type": "string", + "format": "date-time", + "description": "Start time of the job instance." + }, + "endTime": { + "type": "string", + "format": "date-time", + "description": "End time of the job instance." + }, + "status": { + "type": "string", + "description": "Terminated status of the job instance." + }, + "failureReason": { + "type": "string", + "description": "Failure reason of the job instance." + }, + "objectInfo": { + "type": "string", + "description": "Associated object information of the job instance." + }, + "startedRunningTime": { + "type": "string", + "format": "date-time", + "description": "Started running time of the job instance." + }, + "stats": { + "type": "string", + "description": "Associated stats information of the job instance." + }, + "snappableId": { + "type": "string", + "description": "Snappable ID." + }, + "managedId": { + "type": "string", + "description": "Managed ID." + }, + "objectName": { + "type": "string" + }, + "objectType": { + "type": "string" + }, + "objectLocation": { + "type": "string" + }, + "clusterLocation": { + "type": "string" + }, + "slaId": { + "type": "string" + }, + "slaName": { + "type": "string" + }, + "replicationSource": { + "type": "string" + }, + "replicationTarget": { + "type": "string" + }, + "archivalTarget": { + "type": "string" + }, + "directArchive": { + "type": "string" + }, + "recoveryPoint": { + "type": "string" + }, + "recoveryPointType": { + "$ref": "#/definitions/RecoveryPointType" + }, + "userName": { + "type": "string" + } + } + }, + "CascadedArchivalSpec": { + "allOf": [ + { + "$ref": "#/definitions/ArchivalSpecV2" + }, + { + "type": "object", + "required": [ + "retentionLimit" + ], + "properties": { + "retentionLimit": { + "type": "integer", + "format": "int64" + } + } + } + ] + }, + "CascadedArchivalSpecDefinition": { + "type": "object", + "required": [ + "archivalSpecs", + "localRetentionLimit" + ], + "properties": { + "localRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "The retention limit for snapshots on the local Rubrik system which will be replication target." + }, + "archivalSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/CascadedArchivalSpec" + } + } + } + }, + "PolarisManagedSlaDomainDefinition": { + "type": "object", + "required": [ + "firstFullAllowedBackupWindows", + "frequencies", + "name", + "polarisManagedId" + ], + "properties": { + "polarisManagedId": { + "type": "string", + "description": "Polaris Managed ID of the SLA domain." + }, + "name": { + "type": "string" + }, + "frequencies": { + "$ref": "#/definitions/SlaFrequencyV2" + }, + "allowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "firstFullAllowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "localRetentionLimit": { + "type": "integer", + "format": "int64" + }, + "archivalSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ArchivalSpecV2" + } + }, + "replicationSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationSpecV2" + } + }, + "showAdvancedUi": { + "type": "boolean" + }, + "advancedUiConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/AdvancedUiConfigAttributes" + } + }, + "incrementalFrequency": { + "description": "Incremental backup frequency for SAP HANA databases.", + "$ref": "#/definitions/IncrementalFrequencyMap" + }, + "differentialFrequency": { + "description": "Differential backup frequency for SAP HANA databases.", + "$ref": "#/definitions/DifferentialFrequencyMap" + }, + "logConfigs": { + "description": "Log backup configuration for VMware virtual machines and SAP HANA databases.", + "$ref": "#/definitions/LogConfigMap" + } + } + }, + "DataGovAuditTargetType": { + "type": "string", + "description": "The type of audit target.", + "enum": [ + "Netapp", + "Windows", + "Isilon", + "WindowsMiniFilter" + ] + }, + "UserAuditLogCaptureCfg": { + "type": "object", + "required": [ + "audit_target_type", + "enable", + "host_id", + "share_name", + "upload_prefix" + ], + "properties": { + "host_id": { + "type": "string", + "description": "Host id of the resource (NAS filer, windows server)." + }, + "share_name": { + "type": "string", + "description": "Share name of the resource." + }, + "audit_target_type": { + "description": "Type of audit target (Netapp, Windows, Isilon).", + "$ref": "#/definitions/DataGovAuditTargetType" + }, + "enable": { + "type": "boolean", + "description": "Enable or Disable file share auditing." + }, + "upload_prefix": { + "type": "string", + "description": "path prefix to use when uploading audit files." + } + } + }, + "PrincipalQuery": { + "type": "object", + "properties": { + "principalType": { + "type": "string", + "description": "Type of principal (user|group)." + }, + "authDomainId": { + "type": "string", + "description": "ID of auth domain containing principal." + }, + "hasAuthorizations": { + "type": "boolean", + "description": "Whether the principal has any explicit authorizations." + }, + "isDeleted": { + "type": "boolean", + "description": "Whether the principal is deleted." + }, + "searchAttr": { + "type": "array", + "description": "Comma-separated list of attributes by which to search (name|firstName|lastName|emailAddress|description).", + "items": { + "type": "string" + } + }, + "searchValue": { + "type": "array", + "description": "Comma-separated list of values on which to search in the corresponding search attribute.", + "items": { + "type": "string" + } + }, + "searchType": { + "type": "array", + "description": "Comma-separated list of search types for each attribute: prefix|infix|exact (default is \"infix\").", + "items": { + "type": "string" + } + }, + "joinOperator": { + "type": "string", + "description": "Whether results must match any or all of the search attributes: all|any (default is \"all\")." + } + } + }, + "PrincipalSearchRequest": { + "type": "object", + "required": [ + "queries" + ], + "properties": { + "limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of results to return.", + "minimum": 0 + }, + "offset": { + "type": "integer", + "format": "int32", + "description": "Starting offset of the results to return.", + "minimum": 0 + }, + "queries": { + "type": "array", + "description": "List of search queries to perform.", + "items": { + "$ref": "#/definitions/PrincipalQuery" + } + }, + "sort": { + "type": "array", + "description": "List of fields by which to sort the result set.", + "items": { + "$ref": "#/definitions/PrincipalSort" + } + } + } + }, + "PrincipalSort": { + "type": "object", + "required": [ + "attr" + ], + "properties": { + "attr": { + "type": "string", + "description": "Attribute by which to sort: name|principalType|firstName|lastName|emailAddress|description (default is \"name\")." + }, + "order": { + "type": "string", + "description": "Sort order: asc|desc (default is \"asc\")." + } + } + }, + "PrincipalSummary": { + "type": "object", + "required": [ + "authDomainId", + "authorizations", + "id", + "isDeleted", + "isLocked", + "name", + "principalType" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of a principal in an authentication domain." + }, + "principalType": { + "type": "string", + "description": "Type of a principal in an authentication domain. Type can be: user, group, or organization.", + "enum": [ + "user", + "group", + "organization" + ] + }, + "isDeleted": { + "type": "boolean", + "description": "Specifies whether a principal in an authentication domain was deleted. The value is true when the principal was deleted. Otherwise, the value is false." + }, + "authDomainId": { + "type": "string", + "description": "ID of the authentication domain for a specified principal." + }, + "name": { + "type": "string", + "description": "The name of a principal in an authentication domain." + }, + "firstName": { + "type": "string", + "description": "First name of a principal of type user. For all other types the value is null." + }, + "lastName": { + "type": "string", + "description": "Last name of a principal of type user. For all other types the value is null." + }, + "emailAddress": { + "type": "string", + "description": "Email address associated with a principal." + }, + "description": { + "type": "string", + "description": "Short description for a principal of type group. For all other types the value is null." + }, + "authorizations": { + "description": "Explicit authorizations for this principal.", + "$ref": "#/definitions/AuthorizationSummary" + }, + "isLocked": { + "type": "boolean", + "description": "Boolean value that shows the lock state of a user account. Value is true when the account is locked and false when the account is not locked." + } + } + }, + "PrincipalSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/PrincipalSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AdvancedUiConfigAttributes": { + "type": "object", + "required": [ + "retentionType", + "timeUnit" + ], + "properties": { + "timeUnit": { + "description": "Units of frequency. Accepted values are Minute, Hourly, Daily, Weekly, Monthly, Quarterly, and Yearly.", + "$ref": "#/definitions/SlaTimeUnit" + }, + "retentionType": { + "description": "Units of retention. Accepted values are Minute, Hourly, Daily, Weekly, Monthly, Quarterly, and Yearly.", + "$ref": "#/definitions/SlaTimeUnit" + } + } + }, + "ArchivalSpec": { + "type": "object", + "required": [ + "archivalThreshold", + "locationId" + ], + "properties": { + "locationId": { + "type": "string" + }, + "locationName": { + "type": "string" + }, + "archivalThreshold": { + "type": "integer", + "format": "int64" + }, + "archivalTieringSpec": { + "description": "Used to enable either Instant Tiering or Smart Tiering for snapshots that have been uploaded to the given archival location.\n", + "$ref": "#/definitions/ArchivalTieringSpec" + }, + "isPassthroughSupported": { + "type": "boolean", + "description": "A Boolean value that identifies whether the archival location type supports direct archive backups.\n" + } + } + }, + "ArchivalSpecV2": { + "type": "object", + "required": [ + "archivalThreshold" + ], + "properties": { + "locationId": { + "type": "string" + }, + "locationName": { + "type": "string" + }, + "polarisManagedId": { + "type": "string", + "description": "The Polaris managed ID of an archival location. At least one of the parameters locationId and polarisManagedId must be defined to correctly refer to an archival location.\n" + }, + "archivalThreshold": { + "type": "integer", + "format": "int64", + "description": "Amount of time, in seconds, after which the snapshot must be uploaded." + }, + "archivalTieringSpec": { + "description": "Enables Instant Tiering or Smart Tiering for snapshots that were uploaded to the specified archival location.\n", + "$ref": "#/definitions/ArchivalTieringSpec" + }, + "isPassthroughSupported": { + "type": "boolean", + "description": "Boolean value that indicates if the archival location type supports direct archive backups.\n" + } + } + }, + "ArchivalTieringSpec": { + "type": "object", + "required": [ + "isInstantTieringEnabled" + ], + "properties": { + "isInstantTieringEnabled": { + "type": "boolean", + "description": "A Boolean value that determines whether to immediately tier uploaded snapshots to cold storage. When this value is 'true,' uploaded snapshots are immediately tiered to cold storage. When this value is 'false,' snapshots are marked as eligible for tiering to cold storage after their time on the archival location exceeds the configured minimum accessible duration.\n" + }, + "minAccessibleDurationInSeconds": { + "type": "integer", + "format": "int64", + "description": "Specifies an interval in seconds. Uploaded snapshots are accessible for instant recovery for the duration of the specified interval. This value is ignored when Instant Tiering is enabled.\n" + }, + "coldStorageClass": { + "description": "The specific class of cloud storage used by Cold tiering. For Azure storage, the only supported class is Azure Archive. For AWS, specify Glacier or Glacier Deep Archive. By default, the classes are Azure Archive and Glacier for Azure and AWS respectively.\n", + "$ref": "#/definitions/CloudStorageColdTier" + }, + "shouldTierExistingSnapshots": { + "type": "boolean", + "description": "Indicates if existing snapshots for all objects protected by the SLA should be tiered. If not specified, this defaults to false. Only the snapshots that exist in the archival location associated with the SLA will be tiered.\n" + } + } + }, + "BackupWindow": { + "type": "object", + "required": [ + "durationInHours", + "startTimeAttributes" + ], + "properties": { + "startTimeAttributes": { + "$ref": "#/definitions/SlaStartTimeAttributes" + }, + "durationInHours": { + "type": "integer", + "format": "int32" + } + } + }, + "BaseOnDemandSnapshotConfig": { + "type": "object", + "properties": { + "slaId": { + "type": "string" + } + } + }, + "BaseSnapshotDetail": { + "type": "object", + "properties": { + "config": { + "type": "string" + }, + "isCorrupt": { + "type": "boolean" + } + } + }, + "BaseSnapshotSummary": { + "type": "object", + "required": [ + "date", + "id", + "isCustomRetentionApplied", + "isOnDemandSnapshot", + "replicationLocationIds", + "slaId", + "slaName" + ], + "properties": { + "id": { + "type": "string" + }, + "date": { + "type": "string", + "format": "date-time" + }, + "expirationDate": { + "type": "string", + "format": "date-time" + }, + "sourceObjectType": { + "type": "string" + }, + "isOnDemandSnapshot": { + "type": "boolean" + }, + "isCustomRetentionApplied": { + "type": "boolean", + "description": "A Boolean that indicates whether or not custom retention is applied to the snapshot.\n" + }, + "cloudState": { + "type": "integer", + "format": "int64", + "description": "Integer value that represents the archival state of a snapshot. 0 means the snapshot is not archived. 2 means the snapshot is archived. 3 means the snapshot is downloaded from the archival location. 4 means the snapshot is in the process of being downloaded from the archival location. 6 means the snapshot is stored locally and at the archival location.\n" + }, + "consistencyLevel": { + "type": "string" + }, + "indexState": { + "type": "integer", + "format": "int64", + "description": "Integer value representing the state of the indexing job for a snapshot. 0 means that the indexing has not begun or is in progress. 1 means indexing completed successfully. 2 means that the indexer failed to process this snapshot.\n" + }, + "replicationLocationIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "archivalLocationIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "slaId": { + "type": "string", + "description": "(Deprecated) For a policy based snapshot this parameter contains the ID of the SLA Domain currently assigned to the data source of that snapshot. For an on demand snapshot this field corresponds to the SLA Domain that was assigned when the snapshot was taken. A data source, and individual snapshots, can be reassigned to a different SLA Domain, or the SLA Domain can be modified. In any of these cases this parameter can contain a stale and incorrect value. To view retention information for this snapshot, use snapshotRetentionInfo instead." + }, + "slaName": { + "type": "string", + "description": "(Deprecated) For a policy based snapshot this parameter contains the name of the SLA Domain currently assigned to the data source of that snapshot. For an on demand snapshot this field corresponds to the SLA Domain that was assigned when the snapshot was taken. A data source, and individual snapshots, can be reassigned to a different SLA Domain, or the SLA Domain can be modified. In any of these cases this parameter can contain a stale and incorrect value. To view retention information for this snapshot, use snapshotRetentionInfo instead." + }, + "isRetainedByRetentionLockSla": { + "type": "boolean", + "description": "A Boolean that indicates whether the snapshot is being retained under a Retention Lock SLA Domain. When this value is 'true', the snapshot is being retained under a Retention Lock SLA Domain." + }, + "cloudStorageTier": { + "$ref": "#/definitions/SnapshotCloudStorageTier" + }, + "isPlacedOnLegalHold": { + "type": "boolean", + "description": "A Boolean that indicates whether the snapshot is placed on Legal Hold. When this value is 'true', the snapshot is placed on Legal Hold." + }, + "snapshotRetentionInfo": { + "description": "Snapshot retention related information for local, archival and replication locations.", + "$ref": "#/definitions/SnapshotRetentionInfo" + }, + "parentSnapshotId": { + "type": "string", + "description": "ID of the parent snapshot if the current snapshot is a child snapshot. Child snapshots are snapshots of objects that are part of an app, either a vCloud Director vApp or an AppBlueprint. Snapshots of the app are parent snapshots.\n" + } + } + }, + "BulkSlaConflictsSummary": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "List of SLA domain conflict summaries for the specified managed IDs.", + "items": { + "$ref": "#/definitions/SlaConflictsSummary" + } + } + } + }, + "ConfiguredSlaType": { + "type": "string", + "description": "Specifies whether the SLA Domain is used for protection or retention.", + "enum": [ + "ProtectionSla", + "RetentionSla" + ] + }, + "DifferentialFrequencyMap": { + "type": "object", + "properties": { + "SapHanaDatabase": { + "description": "Differential backup frequency configuration for SAP HANA databases.", + "$ref": "#/definitions/SapHanaDifferentialFrequency" + } + } + }, + "EffectiveSlaHolder": { + "type": "object", + "required": [ + "effectiveSlaDomainId", + "effectiveSlaDomainName" + ], + "properties": { + "effectiveSlaDomainId": { + "type": "string", + "description": "The ID of the SLA Domain that controls the protection of the Rubrik object." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "The name of the SLA Domain that controls the protection of the Rubrik object." + }, + "isEffectiveSlaDomainRetentionLocked": { + "type": "boolean", + "description": "Indicates whether the effective SLA Domain is Retention Locked. When this value is 'true', the effective SLA domain is a Retention Lock SLA Domain." + }, + "effectiveSlaDomainPolarisManagedId": { + "type": "string", + "description": "Optional. This field contains the managed ID of of the Polaris-managed effective SLA Domain." + }, + "effectiveSlaSourceObjectId": { + "type": "string", + "description": "The ID of the parent of the Rubrik object from which the SLA Domain that controls the protection of Rubrik object is inherited." + }, + "effectiveSlaSourceObjectName": { + "type": "string", + "description": "The name of the parent of the Rubrik object from which the SLA Domain that controls the protection of Rubrik object is inherited." + } + } + }, + "ExistingSnapshotRetention": { + "type": "string", + "description": "Specifies the retention policy to apply to existing snapshots when unprotecting an object.", + "enum": [ + "RetainSnapshots", + "KeepForever", + "ExpireImmediately" + ] + }, + "FrequencyConfig": { + "type": "object", + "required": [ + "frequency", + "retention" + ], + "properties": { + "frequency": { + "type": "integer", + "format": "int32" + }, + "retention": { + "type": "integer", + "format": "int32" + } + } + }, + "HierarchyObjectIds": { + "type": "object", + "required": [ + "ids" + ], + "properties": { + "ids": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "IncrementalFrequencyMap": { + "type": "object", + "properties": { + "SapHanaDatabase": { + "description": "Incremental backup frequency configuration for SAP HANA databases.", + "$ref": "#/definitions/SapHanaIncrementalFrequency" + } + } + }, + "LogConfigMap": { + "type": "object", + "properties": { + "SapHanaDatabase": { + "description": "Log backup configuration for SAP HANA databases. This defines the retention period for logs of SAP HANA databases.", + "$ref": "#/definitions/SlaLogConfiguration" + }, + "VmwareVirtualMachine": { + "description": "Log backup configuration for continuous data protection of VMware virtual machines.", + "$ref": "#/definitions/SlaLogConfiguration" + } + } + }, + "LogFrequencyType": { + "type": "string", + "description": "Defines the frequency of taking log backups.", + "enum": [ + "Continuous", + "Minute" + ] + }, + "ManagedHierarchyObjectAncestor": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "id": { + "type": "string" + } + } + }, + "ManagedHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "isDeleted", + "isRelic" + ], + "properties": { + "infraPath": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "slaPath": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "isRelic": { + "type": "boolean", + "description": "Whether this managed object is a relic (an archived snappable with unexpired snapshots)." + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the managed hierarchy object is deleted." + } + } + } + ] + }, + "ManagedHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedHierarchySearchObject": { + "type": "object", + "required": [ + "limit", + "offset", + "searchProperties", + "searchText" + ], + "properties": { + "searchText": { + "type": "string", + "description": "Text to infix search hierarchy objects by name and also optionally by location." + }, + "searchProperties": { + "type": "array", + "items": { + "description": "Array of properties to search on.", + "$ref": "#/definitions/SearchProperty" + } + }, + "objectTypes": { + "type": "array", + "items": { + "description": "Array of allowed resource types to search over.", + "$ref": "#/definitions/ObjectType" + } + }, + "primaryClusterId": { + "type": "string", + "description": "Filter by primary Cluster Id or local." + }, + "offset": { + "type": "integer", + "format": "int32", + "description": "Starting offset of the results to return.", + "minimum": 0 + }, + "limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of results to return.", + "minimum": 0 + } + } + }, + "ManagedObject": { + "type": "object", + "required": [ + "ancestors", + "id", + "managedId", + "objectType" + ], + "properties": { + "managedId": { + "type": "string" + }, + "id": { + "type": "string" + }, + "objectType": { + "type": "string" + }, + "ancestors": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ManagedObjectPendingSlaInfo": { + "type": "object", + "required": [ + "objectId", + "pendingSlaDomainId", + "pendingSlaDomainName" + ], + "properties": { + "objectId": { + "type": "string", + "description": "Managed ID of the object." + }, + "pendingSlaDomainId": { + "type": "string" + }, + "pendingSlaDomainName": { + "type": "string" + }, + "isPendingSlaDomainRetentionLocked": { + "type": "boolean" + } + } + }, + "MissedSnapshot": { + "type": "object", + "required": [ + "archivalLocationType", + "missedSnapshotTime", + "missedSnapshotTimeUnits" + ], + "properties": { + "archivalLocationType": { + "type": "array", + "items": { + "type": "string" + } + }, + "missedSnapshotTime": { + "type": "string", + "format": "date-time" + }, + "missedSnapshotTimeUnits": { + "type": "array", + "items": { + "$ref": "#/definitions/MissedSnapshotTimeUnitConfig" + } + } + } + }, + "MissedSnapshotDayOfTimeUnit": { + "type": "string", + "description": "Units for missed snapshot dayOfTime.", + "enum": [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + "FirstDay", + "Fifteenth", + "LastDay" + ] + }, + "MissedSnapshotListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/MissedSnapshot" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "MissedSnapshotTimeUnitConfig": { + "type": "object", + "required": [ + "frequency", + "retention", + "timeUnit" + ], + "properties": { + "timeUnit": { + "description": "Units for frequency and retention. Accepted values are Minute, Hourly, Daily, Weekly, Monthly, Quarterly, and Yearly.", + "$ref": "#/definitions/SlaTimeUnit" + }, + "frequency": { + "type": "integer", + "format": "int32" + }, + "retention": { + "type": "integer", + "format": "int32" + }, + "dayOfTime": { + "description": "Trigger day for Weekly, Monthly, Quarterly, and Yearly units. Accepted values are enum of MissedSnapshotDayOfTimeUnit.", + "$ref": "#/definitions/MissedSnapshotDayOfTimeUnit" + } + } + }, + "MonthlyConfig": { + "allOf": [ + { + "$ref": "#/definitions/FrequencyConfig" + }, + { + "type": "object", + "required": [ + "dayOfMonth" + ], + "properties": { + "dayOfMonth": { + "$ref": "#/definitions/SlaDayOfMonth" + } + } + } + ] + }, + "ProtectedEntity": { + "type": "object", + "required": [ + "managedId" + ], + "properties": { + "managedId": { + "type": "string", + "description": "This field contains the managed ID of an entity explicitly protected by a specified SLA Domain through a direct assignment." + } + } + }, + "ProtectedEntityListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ProtectedEntity" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ProtectionDetail": { + "type": "object", + "required": [ + "configuredSlaDomainId", + "id", + "managedId", + "name", + "objectType" + ], + "properties": { + "managedId": { + "type": "string" + }, + "id": { + "type": "string" + }, + "objectType": { + "type": "string" + }, + "name": { + "type": "string" + }, + "configuredSlaDomainId": { + "type": "string" + }, + "effectiveSlaDomainId": { + "type": "string" + }, + "effectiveSlaDomainName": { + "type": "string" + }, + "effectiveSlaSource": { + "$ref": "#/definitions/ManagedObject" + } + } + }, + "ProtectionDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ProtectionDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "QuarterlyConfig": { + "allOf": [ + { + "$ref": "#/definitions/FrequencyConfig" + }, + { + "type": "object", + "required": [ + "dayOfQuarter", + "firstQuarterStartMonth" + ], + "properties": { + "firstQuarterStartMonth": { + "$ref": "#/definitions/SlaMonth" + }, + "dayOfQuarter": { + "$ref": "#/definitions/SlaDayOfQuarter" + } + } + } + ] + }, + "ReplicationSpec": { + "type": "object", + "required": [ + "locationId", + "retentionLimit" + ], + "properties": { + "locationId": { + "type": "string" + }, + "locationName": { + "type": "string" + }, + "retentionLimit": { + "type": "integer", + "format": "int64" + }, + "logRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "Specifies an interval in seconds. Logs are retained at the replication location until the specified interval expires." + }, + "replicationType": { + "description": "This refers to the type of replication that will be used for the SLA Domain. Default value is \"REPLICATION_TO_CLUSTER\".", + "$ref": "#/definitions/ReplicationType" + } + } + }, + "ReplicationSpecV2": { + "type": "object", + "required": [ + "retentionLimit" + ], + "properties": { + "locationId": { + "type": "string" + }, + "locationName": { + "type": "string" + }, + "polarisManagedId": { + "type": "string", + "description": "This refers to the Polaris managed ID of an archival location. At least one of the params locationId and polarisManagedId should be defined to correctly refer to an archival location. If locationId is provided, then polarisManagedId will be ignored.\n" + }, + "retentionLimit": { + "type": "integer", + "format": "int64", + "description": "Specifies a time duration in seconds. Snapshots will be retained on the replication target location until the duration has passed.\n" + }, + "logRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "Specifies an interval in seconds. Logs are retained at the replication location until the specified interval expires." + }, + "replicationType": { + "description": "This refers to the type of replication that will be used for the SLA Domain. Default value is \"REPLICATION_TO_CLUSTER\".", + "$ref": "#/definitions/ReplicationType" + } + } + }, + "ReplicationType": { + "type": "string", + "description": "The type of replication for an SLA Domain.", + "enum": [ + "REPLICATION_TO_CLUSTER", + "REPLICATION_TO_CLOUD_LOCATION" + ] + }, + "SapHanaDifferentialFrequency": { + "type": "object", + "required": [ + "frequency", + "timeUnit" + ], + "properties": { + "frequency": { + "type": "integer", + "format": "int32", + "description": "Frequency value for differential backup of SAP HANA databases." + }, + "timeUnit": { + "description": "Units for differential backup frequency. Accepted values are Hourly and Daily.", + "$ref": "#/definitions/SapHanaDifferentialFrequencyType" + } + } + }, + "SapHanaDifferentialFrequencyType": { + "type": "string", + "description": "Units for differential backup frequency.", + "enum": [ + "Minutely", + "Hourly", + "Daily" + ] + }, + "SapHanaIncrementalFrequency": { + "type": "object", + "required": [ + "frequency", + "timeUnit" + ], + "properties": { + "frequency": { + "type": "integer", + "format": "int32", + "description": "Frequency value for incremental backup of SAP HANA databases." + }, + "timeUnit": { + "description": "Units for incremental backup frequency. Accepted values are Hourly and Daily.", + "$ref": "#/definitions/SapHanaIncrementalFrequencyType" + } + } + }, + "SapHanaIncrementalFrequencyType": { + "type": "string", + "description": "Units for incremental backup frequency.", + "enum": [ + "Minutely", + "Hourly", + "Daily" + ] + }, + "SearchItemSummary": { + "type": "object", + "required": [ + "id", + "isRelic", + "location", + "name", + "objectType", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string", + "description": "Id." + }, + "name": { + "type": "string", + "description": "Name." + }, + "primaryClusterId": { + "type": "string", + "description": "Primary Cluster Id." + }, + "location": { + "type": "array", + "description": "Sorted list of objects in relevant path.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "effectiveSlaDomainId": { + "type": "string", + "description": "ID of the effective SLA domain." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "Name of the effective SLA domain." + }, + "objectType": { + "$ref": "#/definitions/ObjectType" + }, + "isRelic": { + "type": "boolean", + "description": "Whether this managed object is a relic (an archived snappable with unexpired snapshots)." + }, + "isEffectiveSlaDomainRetentionLocked": { + "type": "boolean", + "description": "A Boolean that indicates whether the effective SLA Domain is Retention Locked. When this value is 'true', the effective SLA Domain is a Retention Lock SLA Domain." + } + } + }, + "SearchItemSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SearchItemSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SearchProperty": { + "type": "string", + "description": "Properties to search on.", + "enum": [ + "name", + "location", + "slaDomain" + ] + }, + "SlaAssignable": { + "type": "object", + "required": [ + "configuredSlaDomainId", + "configuredSlaDomainName", + "id", + "name", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the Rubrik object." + }, + "name": { + "type": "string", + "description": "The name of the Rubrik object." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "The ID of the SLA Domain configured directly on the Rubrik object." + }, + "configuredSlaDomainName": { + "type": "string", + "description": "The name of the SLA Domain configured directly on the Rubrik object." + }, + "configuredSlaDomainType": { + "description": "Specifies whether the SLA Domain is used for protection or retention.", + "$ref": "#/definitions/ConfiguredSlaType" + }, + "primaryClusterId": { + "type": "string", + "description": "The ID of the cluster that manages the Rubrik object." + }, + "isConfiguredSlaDomainRetentionLocked": { + "type": "boolean", + "description": "Indicates whether the configured SLA Domain is Retention Locked. When this value is 'true', the configured SLA Domain is a Retention Lock SLA Domain." + }, + "slaLastUpdateTime": { + "type": "string", + "format": "date-time", + "description": "The UTC time when the SLA Domain was last updated." + } + } + }, + "SlaAssignment": { + "type": "string", + "description": "Specifies the method used to apply an SLA Domain to an object. Possible values are Derived, Direct, and Unassigned.", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + "SlaConflictsSummary": { + "type": "object", + "required": [ + "conflicts", + "id", + "isPossiblyInconsistent" + ], + "properties": { + "id": { + "type": "string", + "description": "managedId." + }, + "conflicts": { + "type": "array", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + } + }, + "isPossiblyInconsistent": { + "type": "boolean", + "description": "Indicates if the results returned are inconsistent due to an ongoing SLA assignment operation within this object's hierarchy. This endpoint does not consider the results of in flight SLA operations since they have not yet completed. Because the results may change once the operation completes, Rubrik advises waiting until all SLA assignments have completed on this hierarchy before reassigning, unless changing the childrens' SLAs directly with this assignment is unacceptable.\n" + } + } + }, + "SlaConflictsSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SlaConflictsSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SlaDayOfMonth": { + "type": "string", + "description": "The day of the month when snapshot will be taken.", + "enum": [ + "FirstDay", + "Fifteenth", + "LastDay" + ] + }, + "SlaDayOfQuarter": { + "type": "string", + "description": "The day of the quarter when snapshot will be taken.", + "enum": [ + "FirstDay", + "LastDay" + ] + }, + "SlaDayOfWeek": { + "type": "string", + "description": "The day of the week when snapshot will be taken.", + "enum": [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday" + ] + }, + "SlaDayOfYear": { + "type": "string", + "description": "The day of the year when snapshot will be taken.", + "enum": [ + "FirstDay", + "LastDay" + ] + }, + "SlaDomainAssignmentInfo": { + "type": "object", + "required": [ + "managedIds" + ], + "properties": { + "managedIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "existingSnapshotRetention": { + "description": "The retention strategy used for existing snapshots of objects that become unprotected. When an SLA Domain assignment is cleared from an object, the retention strategy described in this field is used only when the object cannot inherit a protection SLA Domain from an ancestor object and becomes an unprotected object.", + "$ref": "#/definitions/ExistingSnapshotRetention" + }, + "shouldApplyToExistingSnapshots": { + "type": "boolean", + "description": "A Boolean that specifies whether to retain existing snapshots of assigned objects with the configuration of a specified SLA Domain. The default value is 'true'. If objects are unprotected, the retention of existing snapshots will be determined by the value of parameter 'existingSnapshotRetention'. This field should be kept empty in such cases. When an SLA Domain assignment is cleared from an object, the retention strategy described in this field is used only when the object can inherit a protection SLA Domain from an ancestor object." + }, + "shouldApplyToNonPolicySnapshots": { + "type": "boolean", + "description": "A Boolean which if set to true specifies that the retention changes corresponding to the new SLA should be applied to non-policy snapshots in addition to existing policy based snapshots." + } + } + }, + "SlaDomainDefinition": { + "type": "object", + "required": [ + "firstFullAllowedBackupWindows", + "frequencies", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "frequencies": { + "type": "array", + "items": { + "$ref": "#/definitions/SlaFrequency" + } + }, + "allowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "firstFullAllowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "localRetentionLimit": { + "type": "integer", + "format": "int64" + }, + "archivalSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ArchivalSpec" + } + }, + "replicationSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationSpec" + } + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when an SLA Domain is Retention Locked and false when it is not." + } + } + }, + "SlaDomainPatchDefinitionV2": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "frequencies": { + "$ref": "#/definitions/SlaFrequencyV2" + }, + "logConfig": { + "$ref": "#/definitions/SlaLogConfig" + }, + "allowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "firstFullAllowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "localRetentionLimit": { + "type": "integer", + "format": "int64" + }, + "archivalSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ArchivalSpecV2" + } + }, + "replicationSpecs": { + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationSpecV2" + } + }, + "showAdvancedUi": { + "type": "boolean" + }, + "advancedUiConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/AdvancedUiConfigAttributes" + } + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when an SLA Domain is Retention Locked and false when it is not." + }, + "incrementalFrequency": { + "description": "Incremental backup frequency for SAP HANA databases.", + "$ref": "#/definitions/IncrementalFrequencyMap" + }, + "differentialFrequency": { + "description": "Differential backup frequency for SAP HANA databases.", + "$ref": "#/definitions/DifferentialFrequencyMap" + }, + "logConfigs": { + "description": "Log backup configuration for VMware virtual machines and SAP HANA databases.", + "$ref": "#/definitions/LogConfigMap" + } + } + }, + "SlaDomainSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaObjectCounts" + }, + { + "type": "object", + "required": [ + "allowedBackupWindows", + "firstFullAllowedBackupWindows", + "frequencies", + "id", + "isDefault", + "maxLocalRetentionLimit", + "name", + "primaryClusterId" + ], + "properties": { + "id": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "frequencies": { + "type": "array", + "items": { + "$ref": "#/definitions/SlaFrequency" + } + }, + "allowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "firstFullAllowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "localRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "Retention limit for snapshots on the local Rubrik system. If none, they will remain as long as SLA requires." + }, + "maxLocalRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "Maximum limit for snapshots to be retained on the local Rubrik system. For local sla, it would be max of frequencies but for remote sla, it would be the retentionLimit set on the replication target location. (Local location is the replication target location for remote sla)." + }, + "archivalSpecs": { + "type": "array", + "description": "Specification for archival locations on this SLA.", + "items": { + "$ref": "#/definitions/ArchivalSpec" + } + }, + "replicationSpecs": { + "type": "array", + "description": "Specification for replication locations on this SLA.", + "items": { + "$ref": "#/definitions/ReplicationSpec" + } + }, + "isDefault": { + "type": "boolean" + }, + "uiColor": { + "type": "string" + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when an SLA Domain is Retention Locked and false when it is not." + }, + "isPaused": { + "type": "boolean", + "description": "A Boolean value that specifies whether protection for all the snappables that are protected by the specified SLA Domain is paused. When the value is 'true' protection is paused." + } + } + } + ] + }, + "SlaDomainSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SlaDomainSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SlaDomainSummaryV2": { + "allOf": [ + { + "$ref": "#/definitions/SlaObjectCounts" + }, + { + "type": "object", + "required": [ + "advancedUiConfig", + "allowedBackupWindows", + "firstFullAllowedBackupWindows", + "frequencies", + "id", + "isDefault", + "maxLocalRetentionLimit", + "name", + "primaryClusterId", + "primaryClusterSlaVersion", + "showAdvancedUi" + ], + "properties": { + "id": { + "type": "string" + }, + "primaryClusterId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "primaryClusterSlaVersion": { + "type": "integer", + "format": "int32", + "description": "Specifies the version of the SLA that is incremented for every user SLA Domain edit. For remote SLA Domains, this specifies the version of SLA Domain used by the source cluster." + }, + "replicationTargetSlaVersion": { + "type": "integer", + "format": "int32", + "description": "Specifies the version of the remote SLA Domain that is incremented for every user SLA Domain on the replication target." + }, + "polarisManagedId": { + "type": "string", + "description": "Optional field containing Polaris managed ids of the Polaris managed SLAs. This field will be set only if the SLA is Polaris managed." + }, + "frequencies": { + "$ref": "#/definitions/SlaFrequencyV2" + }, + "logConfig": { + "$ref": "#/definitions/SlaLogConfig" + }, + "allowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "firstFullAllowedBackupWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/BackupWindow" + } + }, + "localRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "The retention limit for snapshots on the local Rubrik system. When no limit is specified, snapshots are retained up to the limit specified by the SLA." + }, + "maxLocalRetentionLimit": { + "type": "integer", + "format": "int64", + "description": "The maximum retention limit for snapshots on the local Rubrik system. For snapshots operating under a local SLA, this limit is the longest period specified in the SlaFrequencyV2 object. For snapshots under a remote SLA, this limit is the value of the retentionLimit variable set at the replication target location." + }, + "archivalSpecs": { + "type": "array", + "description": "Specification for archival locations on this SLA.", + "items": { + "$ref": "#/definitions/ArchivalSpecV2" + } + }, + "replicationSpecs": { + "type": "array", + "description": "Specification for the replication locations on this SLA.", + "items": { + "$ref": "#/definitions/ReplicationSpecV2" + } + }, + "isDefault": { + "type": "boolean" + }, + "uiColor": { + "type": "string" + }, + "showAdvancedUi": { + "type": "boolean" + }, + "advancedUiConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/AdvancedUiConfigAttributes" + } + }, + "isRetentionLocked": { + "type": "boolean", + "description": "Boolean value that identifies a Retention Lock SLA Domain. Value is true when an SLA Domain is Retention Locked and false when it is not." + }, + "isPaused": { + "type": "boolean", + "description": "A Boolean value that specifies whether protection for all the snappables that are protected by the specified SLA Domain is paused. When the value is 'true' protection is paused." + }, + "incrementalFrequency": { + "description": "Incremental backup frequency for SAP HANA databases.", + "$ref": "#/definitions/IncrementalFrequencyMap" + }, + "differentialFrequency": { + "description": "Differential backup frequency for SAP HANA databases.", + "$ref": "#/definitions/DifferentialFrequencyMap" + }, + "logConfigs": { + "description": "Log backup configuration for VMware virtual machines and SAP HANA databases.", + "$ref": "#/definitions/LogConfigMap" + } + } + } + ] + }, + "SlaDomainUpdateSummary": { + "type": "object", + "required": [ + "patchedSlaDomainSummary" + ], + "properties": { + "patchedSlaDomainSummary": { + "description": "Object containing the patched SLA Domain.", + "$ref": "#/definitions/SlaDomainSummaryV2" + }, + "batchAsyncRequestStatus": { + "description": "Queued asynchronous requests for assigning SLA Domains to snapshots.", + "$ref": "#/definitions/BatchAsyncRequestStatus" + } + } + }, + "SlaFrequency": { + "type": "object", + "required": [ + "frequency", + "retention", + "timeUnit" + ], + "properties": { + "timeUnit": { + "type": "string", + "description": "Units for frequency and retention. Accepted values are Hourly, Daily, Weekly, Monthly, and Yearly." + }, + "frequency": { + "type": "integer", + "format": "int32" + }, + "retention": { + "type": "integer", + "format": "int32" + } + } + }, + "SlaFrequencyV2": { + "type": "object", + "properties": { + "minute": { + "$ref": "#/definitions/FrequencyConfig" + }, + "hourly": { + "$ref": "#/definitions/FrequencyConfig" + }, + "daily": { + "$ref": "#/definitions/FrequencyConfig" + }, + "weekly": { + "$ref": "#/definitions/WeeklyConfig" + }, + "monthly": { + "$ref": "#/definitions/MonthlyConfig" + }, + "quarterly": { + "$ref": "#/definitions/QuarterlyConfig" + }, + "yearly": { + "$ref": "#/definitions/YearlyConfig" + } + } + }, + "SlaLogConfig": { + "type": "object", + "properties": { + "slaLogFrequencyConfig": { + "$ref": "#/definitions/SlaLogFrequencyConfig" + } + }, + "description": "(Deprecated) Log backup configuration for VMware virtual machines only. To track log backup configuration by object type, use **logConfigs** instead." + }, + "SlaLogConfiguration": { + "type": "object", + "properties": { + "slaLogFrequencyConfig": { + "$ref": "#/definitions/SlaLogFrequencyConfig" + } + } + }, + "SlaLogFrequencyConfig": { + "type": "object", + "required": [ + "logFrequencyType", + "retention" + ], + "properties": { + "retention": { + "type": "integer", + "format": "int32" + }, + "logFrequencyType": { + "$ref": "#/definitions/LogFrequencyType" + }, + "frequency": { + "type": "integer", + "format": "int32" + } + } + }, + "SlaMonth": { + "type": "string", + "description": "The month of the year when snapshot will be taken.", + "enum": [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December" + ] + }, + "SlaObjectCounts": { + "allOf": [ + { + "$ref": "#/definitions/NutanixVmSlaObjectCount" + }, + { + "$ref": "#/definitions/AwsEc2InstanceSlaObjectCount" + }, + { + "$ref": "#/definitions/HypervVmSlaObjectCount" + }, + { + "$ref": "#/definitions/ManagedVolumeSlaObjectCount" + }, + { + "$ref": "#/definitions/MssqlDbSlaObjectCount" + }, + { + "$ref": "#/definitions/VcdVappSlaObjectCount" + }, + { + "type": "object", + "properties": { + "numOracleDbs": { + "type": "integer", + "format": "int32", + "description": "The number of actively protected oracle databases under this SLA Domain." + }, + "numFilesets": { + "type": "integer", + "format": "int32", + "description": "The number of filesets protected under this SLA Domain." + }, + "numStorageArrayVolumeGroups": { + "type": "integer", + "format": "int32", + "description": "The number of storage array volume groups protected under this SLA Domain." + }, + "numWindowsVolumeGroups": { + "type": "integer", + "format": "int32", + "description": "The number of Windows volume groups protected under this SLA Domain." + }, + "numLinuxHosts": { + "type": "integer", + "format": "int32", + "description": "The number of Linux servers with filesets protected under this SLA Domain." + }, + "numShares": { + "type": "integer", + "format": "int32", + "description": "The number of shares protected under this SLA Domain." + }, + "numWindowsHosts": { + "type": "integer", + "format": "int32", + "description": "The number of Windows servers with filesets protected under this SLA Domain." + }, + "numVms": { + "type": "integer", + "format": "int32" + }, + "numProtectedObjects": { + "type": "integer", + "format": "int32", + "description": "The total number of protected ojects under this SLA Domain." + } + } + } + ] + }, + "SlaStartTimeAttributes": { + "type": "object", + "required": [ + "hour", + "minutes" + ], + "properties": { + "minutes": { + "type": "integer", + "format": "int32" + }, + "hour": { + "type": "integer", + "format": "int32" + }, + "dayOfWeek": { + "type": "integer", + "format": "int32" + } + } + }, + "SlaTimeUnit": { + "type": "string", + "description": "Units for frequency and retention. Accepted values are Minute, Hourly, Daily, Weekly, Monthly, Quarterly, and Yearly.", + "enum": [ + "Minute", + "Hourly", + "Daily", + "Weekly", + "Monthly", + "Quarterly", + "Yearly" + ] + }, + "Snappable": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "$ref": "#/definitions/EffectiveSlaHolder" + }, + { + "type": "object", + "required": [ + "slaAssignment" + ], + "properties": { + "slaAssignment": { + "type": "string", + "description": "The SLA assignment type. Direct SLA assignment means that a SLA Domain was configured directly on the Rubrik object by the user. Derived SLA assignment means that the Rubrik object inherits an SLA Domain from its parent Rubrik object.", + "enum": [ + "Derived", + "Direct", + "Unassigned" + ] + }, + "retentionSlaDomainId": { + "type": "string", + "description": "The ID of the SLA Domain whose retention policy is in use." + } + } + } + ] + }, + "SnapshotCloudStorageTier": { + "type": "string", + "description": "The current cloud storage tier of a snapshot. A snapshot's cloud storage tier determines how the cloud provider will determine storage and retrieval costs, as well as retrieval latency. Accepted values are Hot, Cool, AzureArchive (with Azure locations), Glacier, and GlacierDeepArchive (for AWS S3 locations). The value Cold has been deprecated in favor of AzureArchive, which is the recommended replacement value.\n", + "enum": [ + "Hot", + "Cool", + "Cold", + "AzureArchive", + "Glacier", + "GlacierDeepArchive" + ] + }, + "SnapshotLocationRetentionInfo": { + "type": "object", + "required": [ + "id", + "isSnapshotPresent", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the location." + }, + "name": { + "type": "string", + "description": "Name of the location." + }, + "isSnapshotPresent": { + "type": "boolean", + "description": "Boolean that specifies whether the snapshot is present at this location. When this value is 'false,' the snapshot is expired at this location. Because retention information is unreliable for locations where the snapshots are not present, confirming that this value is 'true' is the best practice." + }, + "isExpirationDateCalculated": { + "type": "boolean", + "description": "A Boolean that indicates whether expiration date for snapshot has been calculated. This field will be absent if the snapshot has never existed at this location." + }, + "expirationTime": { + "type": "string", + "format": "date-time", + "description": "Time when the snapshot expired or is expected to expire at this location. This field will only be set if the snapshot has ever existed at the location. If the snapshot is present at the location, but the expiration time calculation is pending, this field will be absent. If the expiration time calculation is complete and the field is still absent, the snapshot will be retained forever at this location." + }, + "snapshotFrequency": { + "type": "string", + "description": "The tag to determine what frequency the snapshot corresponds to at this location. The snapshot tag can be hourly, daily, weekly, monthly, quarterly, or yearly depending on the SLA frequency which is used to determine the retention of the snapshot. A value of \"Ready for Deletion\" means that the snapshot will be deleted soon. A value of \"Forever\" means that the snapshot will never be deleted. This field is absent when the tag computation is incomplete.\n" + }, + "isExpirationInformationUnavailable": { + "type": "boolean", + "description": "Indicates whether expiration information of the snapshot is unavailable at this location. This field is always and only present for replication locations. Its value is true if and only if the replicated snapshots are from pre-5.2 cluster.\n" + } + } + }, + "SnapshotRetentionInfo": { + "type": "object", + "required": [ + "archivalInfos", + "cloudNativeLocationInfo", + "replicationInfos" + ], + "properties": { + "localInfo": { + "description": "Snapshot retention information on the local cluster.", + "$ref": "#/definitions/SnapshotLocationRetentionInfo" + }, + "archivalInfos": { + "type": "array", + "description": "List of snapshot retention information on the archival locations.", + "items": { + "$ref": "#/definitions/SnapshotLocationRetentionInfo" + } + }, + "replicationInfos": { + "type": "array", + "description": "List of snapshot retention information on the replicated locations.", + "items": { + "$ref": "#/definitions/SnapshotLocationRetentionInfo" + } + }, + "cloudNativeLocationInfo": { + "type": "array", + "description": "Snapshot retention information such as frequency tag and expected expiration time on the cloud native locations.\n", + "items": { + "$ref": "#/definitions/SnapshotLocationRetentionInfo" + } + } + } + }, + "StartTimeAttributes": { + "type": "object", + "required": [ + "hour", + "minutes" + ], + "properties": { + "minutes": { + "type": "integer", + "format": "int32" + }, + "hour": { + "type": "integer", + "format": "int32" + }, + "dayOfWeek": { + "type": "integer", + "format": "int32" + }, + "dayOfMonth": { + "type": "integer", + "format": "int32" + }, + "weekOfMonth": { + "type": "integer", + "format": "int32" + }, + "month": { + "type": "integer", + "format": "int32" + }, + "year": { + "type": "integer", + "format": "int32" + } + } + }, + "Status": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "WeeklyConfig": { + "allOf": [ + { + "$ref": "#/definitions/FrequencyConfig" + }, + { + "type": "object", + "required": [ + "dayOfWeek" + ], + "properties": { + "dayOfWeek": { + "$ref": "#/definitions/SlaDayOfWeek" + } + } + } + ] + }, + "YearlyConfig": { + "allOf": [ + { + "$ref": "#/definitions/FrequencyConfig" + }, + { + "type": "object", + "required": [ + "dayOfYear", + "yearStartMonth" + ], + "properties": { + "yearStartMonth": { + "$ref": "#/definitions/SlaMonth" + }, + "dayOfYear": { + "$ref": "#/definitions/SlaDayOfYear" + } + } + } + ] + }, + "RefreshableObjectConnectionStatus": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "description": "Status of the refreshable object.", + "$ref": "#/definitions/RefreshableObjectConnectionStatusType" + }, + "message": { + "type": "string", + "description": "Details about the object status. Will be populated if the status is \"BadlyConfigured\"." + } + } + }, + "RefreshableObjectConnectionStatusType": { + "type": "string", + "description": "Status of the refreshable object. Possible values are \"Disconnected\" (no communication possible with object), \"Refreshing\" (able to communicate with object but has not refreshed yet), \"Connected\" (refreshed the metadata for the object), \"BadlyConfigured\" (object not configured correctly), \"Deleting\" (in process of removing the object), and \"Remote\" (replicated object that we should not connect to).", + "enum": [ + "Disconnected", + "Refreshing", + "Connected", + "BadlyConfigured", + "Deleting", + "Remote" + ] + }, + "DeleteReplicationSourceJob": { + "type": "object", + "required": [ + "jobId", + "locationId" + ], + "properties": { + "jobId": { + "type": "string", + "description": "Job ID of the job that was scheduled. This job is responsible for archiving metadata associated with with deleted source Rubrik cluster.\n" + }, + "locationId": { + "type": "string", + "description": "The UUID of the source Rubrik cluster to delete on the target Rubrik cluster.\n" + } + } + }, + "DeleteReplicationSourceJobConfig": { + "type": "object", + "required": [ + "locationId" + ], + "properties": { + "locationId": { + "type": "string", + "description": "The UUID of the source Rubrik cluster to delete on the target Rubrik cluster.\n" + } + } + }, + "DeleteReplicationSourceJobListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DeleteReplicationSourceJob" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DeleteReplicationSourceSummary": { + "type": "object", + "required": [ + "jobInstanceId", + "message", + "status" + ], + "properties": { + "status": { + "type": "integer", + "format": "int32", + "description": "Indicates the status of the request. When this value is '0' the request was valid and succeeded in scheduling the removal of the replication source Rubrik cluster. Otherwise the value is '-1'.\n" + }, + "message": { + "type": "string", + "description": "Additional information regarding status of request. This can provide more information on why the status was not successful.\n" + }, + "jobInstanceId": { + "type": "string", + "description": "Job ID of the job that was scheduled. This job is responsible for archiving metadata associated with with deleted source Rubrik cluster.\n" + } + } + }, + "ReplicationSourceSummary": { + "type": "object", + "required": [ + "id", + "replicationSetup", + "sourceClusterAddress", + "sourceClusterName", + "sourceClusterUuid" + ], + "properties": { + "id": { + "type": "string", + "description": "The Managed ID of the source Rubrik cluster. A Managed ID represents the UUID and type of the object.\n" + }, + "sourceClusterUuid": { + "type": "string", + "description": "The UUID of the source Rubrik cluster." + }, + "sourceClusterName": { + "type": "string", + "description": "The cluster name of the source Rubrik cluster." + }, + "sourceClusterAddress": { + "type": "string", + "description": "The IP address of a node on the source Rubrik cluster." + }, + "sourceGateway": { + "description": "The network gateway on the source Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "replicationSetup": { + "type": "string", + "description": "Replication network configuration type. Currently 'NAT', 'Private Network', and 'Polaris' are available.\n" + }, + "isReplicationTargetPauseEnabled": { + "type": "boolean", + "description": "Indicates whether the local target Rubrik cluster has paused replication from the source Rubrik cluster using Source Specific Replication Pause.\n" + }, + "isRemoteGlobalBlackoutActive": { + "type": "boolean", + "description": "Indicates whether the remote source Rubrik cluster has paused replication to local target Rubrik cluster using Global Protection Pause.\n" + } + } + }, + "ReplicationSourceSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ReplicationSourceSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ReplicationTargetDefinition": { + "type": "object", + "required": [ + "password", + "replicationSetup", + "username" + ], + "properties": { + "targetClusterAddress": { + "type": "string", + "description": "The IP address of a node on the target Rubrik cluster." + }, + "targetGateway": { + "description": "The network gateway on the target Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "sourceGateway": { + "description": "The network gateway on the source Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "replicationSetup": { + "type": "string", + "description": "Replication network configuration type. Currently 'NAT', 'Private Network', and 'Polaris' are available.\n" + }, + "caCerts": { + "type": "string", + "description": "The certificate used by replication to make remote procedure calls to remote replication Rubrik cluster.\n" + }, + "username": { + "type": "string", + "description": "The username of a user on the target Rubrik cluster that is used to authorize the replication pairing.\n" + }, + "password": { + "type": "string", + "description": "The password of a user on the target Rubrik cluster that is used to authorize the replication pairing.\n", + "x-secret": true + }, + "realm": { + "type": "string", + "description": "Realm of provided credentials. This is the security policy domain defined for the target Rubrik cluster.\n" + } + } + }, + "ReplicationTargetSummary": { + "type": "object", + "required": [ + "id", + "replicationSetup" + ], + "properties": { + "id": { + "type": "string", + "description": "The Managed ID of the target Rubrik cluster. A Managed ID represents the UUID and type of the object.\n" + }, + "targetClusterUuid": { + "type": "string", + "description": "The UUID of the target Rubrik cluster." + }, + "targetClusterName": { + "type": "string", + "description": "The cluster name of the target Rubrik cluster." + }, + "targetClusterAddress": { + "type": "string", + "description": "The IP address of a node on the replication target Rubrik cluster.\n" + }, + "targetGateway": { + "description": "The network gateway on the target Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "replicationSetup": { + "type": "string", + "description": "Replication network configuration type. Currently 'NAT', 'Private Network', and 'Polaris' are available.\n" + }, + "isRetentionLockEnabledLocation": { + "type": "boolean", + "description": "Indicates location is WORM SLA Domain enabled. When this value is 'true', a WORM SLA Domain exists that replicates to the target Rubrik cluster. A WORM SLA Domain is a SLA Domain that guarantees snapshots derived from the SLA Domain do not expire before the mandatory retention period.\n" + }, + "isReplicationTargetPauseEnabled": { + "type": "boolean", + "description": "Indicates whether the local target Rubrik cluster has paused replication from the source Rubrik cluster using Source Specific Replication Pause.\n" + } + } + }, + "ReplicationTargetSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ReplicationTargetSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ReplicationTargetUpdate": { + "type": "object", + "properties": { + "targetClusterAddress": { + "type": "string", + "description": "The IP address of a node on the target Rubrik cluster." + }, + "targetGateway": { + "description": "The network gateway on the target Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "sourceGateway": { + "description": "The network gateway on the source Rubrik cluster for NAT network configurations.\n", + "$ref": "#/definitions/GatewayInfo" + }, + "replicationSetup": { + "type": "string", + "description": "Replication network configuration type. Currently 'NAT', 'Private Network', and 'Polaris' are available.\n" + }, + "caCerts": { + "type": "string", + "description": "The certificate used by replication to make remote procedure calls to remote replication Rubrik cluster.\n" + } + } + }, + "AttachmentType": { + "type": "string", + "description": "File type for a report subscription attachment.", + "enum": [ + "Csv" + ] + }, + "CdpLocalStatus": { + "type": "string", + "description": "Current Local CDP Status of virtual machine.", + "enum": [ + "NotEnabled", + "Pending", + "TakingSnapshot", + "Failed", + "Active", + "Resyncing" + ] + }, + "CdpReplicationStatus": { + "type": "string", + "description": "Current CDP Replication Status of virtual machine.", + "enum": [ + "NotEnabled", + "Failed", + "Healthy", + "Initializing" + ] + }, + "ChartData": { + "allOf": [ + { + "$ref": "#/definitions/ChartSummary" + }, + { + "type": "object", + "required": [ + "dataColumns", + "reportTemplate" + ], + "properties": { + "reportTemplate": { + "description": "The template this report is based on.", + "$ref": "#/definitions/ReportTemplateName" + }, + "dataColumns": { + "type": "array", + "description": "Data columns for the chart.", + "items": { + "$ref": "#/definitions/ChartDataColumn" + } + }, + "remainderDataColumn": { + "description": "Aggregated values for any remaining data.", + "$ref": "#/definitions/ChartDataColumn" + } + } + } + ] + }, + "ChartDataColumn": { + "type": "object", + "required": [ + "dataPoints", + "label" + ], + "properties": { + "label": { + "type": "string", + "description": "Value label for the data." + }, + "dataPoints": { + "type": "array", + "description": "The number values.", + "items": { + "$ref": "#/definitions/ChartDataPoint" + } + } + } + }, + "ChartDataPoint": { + "type": "object", + "required": [ + "measure", + "value" + ], + "properties": { + "measure": { + "type": "string", + "description": "Measure label for the data." + }, + "value": { + "type": "number", + "format": "double", + "description": "The number value." + } + } + }, + "ChartSummary": { + "type": "object", + "required": [ + "attribute", + "chartType", + "id", + "measure", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the chart." + }, + "name": { + "type": "string", + "description": "Name of the chart." + }, + "chartType": { + "type": "string", + "description": "Type of the chart.", + "enum": [ + "Donut", + "VerticalBar", + "HorizontalBar", + "Line", + "StackedVerticalBar", + "StackedHorizontalBar" + ] + }, + "attribute": { + "type": "string", + "description": "Attribute for the chart.", + "enum": [ + "Hour", + "Day", + "Month", + "Quarter", + "Year", + "SlaDomain", + "TaskStatus", + "TaskType", + "Location", + "ObjectName", + "ObjectType", + "ClusterLocation", + "ComplianceStatus", + "ArchivalComplianceStatus", + "LocalCdpStatus", + "StartMethod" + ] + }, + "measure": { + "type": "string", + "description": "Measure for the chart.", + "enum": [ + "Duration", + "DataTransferred", + "LogicalDataProtected", + "LogicalObjectSize", + "DataStored", + "NumFilesTransferred", + "EffectiveThroughput", + "DedupRatio", + "LogicalDedupRatio", + "DataReductionPercent", + "LogicalDataReductionPercent", + "TaskCount", + "SuccessfulTaskCount", + "CanceledTaskCount", + "FailedTaskCount", + "AverageDuration", + "ObjectCount", + "TotalLocalStorage", + "TotalReplicaStorage", + "TotalArchiveStorage", + "LocalStorageGrowth", + "ArchiveStorageGrowth", + "ReplicaStorageGrowth", + "InComplianceCount", + "NonComplianceCount", + "ArchivalInComplianceCount", + "ArchivalNonComplianceCount", + "TotalSnapshots", + "MissedLocalSnapshots", + "MissedArchivalSnapshots", + "LocalSnapshots", + "ReplicaSnapshots", + "ArchiveSnapshots", + "StackedTaskCountByStatus", + "StackedTotalData", + "StackedTotalStorage", + "StackedStorageGrowth", + "StackedSnapshotCount", + "StackedComplianceCountByStatus", + "StackedArchivalComplianceCountByStatus", + "LocalCdpLogStorage", + "LocalCdpThroughput", + "StackedReplicationComplianceCountByStatus", + "ReplicationInComplianceCount", + "ReplicationNonComplianceCount" + ] + } + } + }, + "ComplianceRangeFilter": { + "type": "string", + "description": "Specifies a number of snapshots. Compliance for each object is calculated for the most recent snapshots, up to the specified number.", + "enum": [ + "LastSnapshot", + "Last2Snapshots", + "Last3Snapshots", + "AllSnapshots" + ] + }, + "ComplianceReportTimeRangeFilter": { + "type": "string", + "description": "All possible spans to calculate Compliance report over (either time-based or SLA-based).", + "enum": [ + "Past24Hours", + "Past7Days", + "Past30Days", + "Past90Days", + "Past365Days", + "LastSnapshot", + "Last2Snapshots", + "Last3Snapshots", + "AllSnapshots" + ] + }, + "ComplianceStatus": { + "type": "string", + "description": "Compliance status of a objects.", + "enum": [ + "InCompliance", + "NonCompliance", + "Unprotected" + ] + }, + "ComplianceSummary": { + "type": "object", + "required": [ + "numberAwaitingFirstFull", + "numberOfInComplianceSnapshots", + "numberOfOutOfComplianceSnapshots", + "percentInCompliance", + "percentOutOfCompliance", + "totalProtected" + ], + "properties": { + "totalProtected": { + "type": "integer", + "format": "int64", + "description": "Total number of protected objects." + }, + "numberOfInComplianceSnapshots": { + "type": "integer", + "format": "int64", + "description": "Number of objects in compliance in the past 24 hours." + }, + "numberOfOutOfComplianceSnapshots": { + "type": "integer", + "format": "int64", + "description": "Number of objects out of compliance in the past 24 hours." + }, + "numberAwaitingFirstFull": { + "type": "integer", + "format": "int64", + "description": "Number of objects awaiting first full." + }, + "percentInCompliance": { + "type": "number", + "format": "double", + "description": "Percent of objects in compliance in the past 24 hours." + }, + "percentOutOfCompliance": { + "type": "number", + "format": "double", + "description": "Percent of objects out of compliance in the past 24 hours." + } + } + }, + "DataSourceDownloadConfig": { + "type": "object", + "required": [ + "accessKey", + "bucket", + "dataSourceType", + "objectName", + "secretKey", + "storageProvider" + ], + "properties": { + "dataSourceType": { + "type": "string", + "description": "Type of the report data source.", + "enum": [ + "ProtectionTasks", + "GlobalObject" + ] + }, + "since": { + "type": "string", + "format": "date-time", + "description": "Start date of the report data source (queued_time column)." + }, + "storageProvider": { + "description": "Object store where the storage object needs to be uploaded.", + "$ref": "#/definitions/StorageProvider" + }, + "objectName": { + "type": "string", + "description": "Name of the storage object to upload to." + }, + "accessKey": { + "type": "string", + "description": "Project ID under Google Storage. Access key under AWS.", + "x-secret": true + }, + "secretKey": { + "type": "string", + "description": "The secret key associated with the user/service account to access storage.", + "x-secret": true + }, + "bucket": { + "type": "string", + "description": "Bucket name cannot contain whitespace or _\\\\/*?%.:|<> For AWS, bucket name also cannot contain capital letters or underscore.\n" + }, + "endpoint": { + "type": "string", + "description": "Endpoint to access S3Compatible server. This is not needed for Google Cloud uploads." + } + } + }, + "DataSourceTableData": { + "allOf": [ + { + "$ref": "#/definitions/TableSummary" + }, + { + "type": "object", + "required": [ + "dataGrid", + "hasMore" + ], + "properties": { + "dataGrid": { + "type": "array", + "description": "Table rows.", + "items": { + "type": "array", + "description": "A row of the table.", + "items": { + "type": "string", + "description": "A single data point for the table." + } + } + }, + "hasMore": { + "type": "boolean", + "description": "A Boolean value that specifies whether or not the list has more elements. This value is 'true' when the list has more elements. This value is 'false' when the list has no more elements." + }, + "cursor": { + "type": "string", + "description": "Cursor of the last table row sent in the response. Used for setting the cursor when getting the next page of the table." + }, + "updatedTime": { + "type": "string", + "format": "date-time", + "description": "Last updated time." + } + } + } + ] + }, + "DataSourceTableRequest": { + "type": "object", + "required": [ + "dataSource" + ], + "properties": { + "reportTableRequest": { + "description": "Definition of items to get from the data source table.", + "$ref": "#/definitions/ReportTableRequest" + }, + "columns": { + "description": "Columns to include in the table.", + "$ref": "#/definitions/TableSummary" + }, + "dataSource": { + "type": "string", + "description": "Name of the report data source.", + "enum": [ + "FrequentDataSource", + "ReportableTasks", + "GlobalObject", + "HistoricalStats", + "TaskDiagnostics" + ] + }, + "snapshotRange": { + "description": "Snapshot range on which to apply compliance status filter. Valid only for report data source FrequentDataSource.", + "$ref": "#/definitions/ComplianceRangeFilter" + } + } + }, + "DateFilter": { + "type": "object", + "required": [ + "period" + ], + "properties": { + "beforeDate": { + "type": "string", + "format": "date-time", + "description": "End date of the range." + }, + "afterDate": { + "type": "string", + "format": "date-time", + "description": "Start date of the range." + }, + "period": { + "type": "string", + "description": "Period of time.", + "enum": [ + "PastDay", + "PastWeek", + "Past30Days", + "PastYear", + "CustomDate" + ] + } + } + }, + "EmailSubscriptionCreate": { + "allOf": [ + { + "$ref": "#/definitions/EmailSummary" + }, + { + "type": "object", + "required": [ + "timeAttributes" + ], + "properties": { + "timeAttributes": { + "$ref": "#/definitions/SubscriptionScheduleTimeAttributes" + } + } + } + ] + }, + "EmailSubscriptionSummary": { + "allOf": [ + { + "$ref": "#/definitions/EmailSubscriptionCreate" + }, + { + "type": "object", + "required": [ + "id", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to an email subscription object." + }, + "status": { + "description": "Status of the email subscription object.", + "$ref": "#/definitions/ReportSubscriptionStatus" + }, + "owner": { + "description": "Owner of the email subscription object.", + "$ref": "#/definitions/ReportSubscriptionOwner" + } + } + } + ] + }, + "EmailSubscriptionUpdate": { + "allOf": [ + { + "$ref": "#/definitions/EmailSubscriptionCreate" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to an email subscription object." + }, + "assumeOwnership": { + "type": "boolean", + "description": "Changes the owner of an email subscription object to the username of the account that is logged into the current session." + } + } + } + ] + }, + "EmailSummary": { + "type": "object", + "required": [ + "attachments", + "emailAddresses" + ], + "properties": { + "emailAddresses": { + "type": "array", + "description": "Email addresses to send reports to.", + "items": { + "type": "string" + } + }, + "attachments": { + "type": "array", + "description": "Attachment files to send with the subscription.", + "items": { + "$ref": "#/definitions/AttachmentType" + } + } + } + }, + "FilterSummary": { + "type": "object", + "properties": { + "dateConfig": { + "description": "Date range for the data.", + "$ref": "#/definitions/DateFilter" + }, + "organization": { + "type": "array", + "description": "Organization IDs.", + "items": { + "type": "string" + } + }, + "taskType": { + "type": "array", + "description": "Task type.", + "items": { + "$ref": "#/definitions/ReportableTaskType" + } + }, + "taskStatus": { + "type": "array", + "description": "Status of the task.", + "items": { + "$ref": "#/definitions/ReportableTaskStatus" + } + }, + "slaDomain": { + "type": "array", + "description": "SLA domain IDs.", + "items": { + "type": "string" + } + }, + "retentionSlaDomain": { + "type": "array", + "description": "Retention SLA domain IDs.", + "items": { + "type": "string" + } + }, + "complianceStatus": { + "type": "array", + "description": "Compliance status.", + "items": { + "$ref": "#/definitions/ComplianceStatus" + } + }, + "objects": { + "type": "array", + "description": "Object IDs.", + "items": { + "type": "string" + } + }, + "objectType": { + "type": "array", + "description": "Object types.", + "items": { + "$ref": "#/definitions/ReportableObjectType" + } + }, + "objectState": { + "type": "array", + "description": "An array containing the object state string for a specified group of SLA Domains and data objects. Active refers to an SLA Domain or a data object that is actively in use. Archived refers to an SLA Domain or a data object that was removed from active data management. Relic refers to a data object with existing unexpired snapshots.\n", + "items": { + "type": "string", + "enum": [ + "Active", + "Archived", + "Relic" + ] + } + }, + "objectLocation": { + "type": "array", + "description": "Object locations.", + "items": { + "type": "string" + } + }, + "clusterLocation": { + "type": "array", + "description": "Cluster locations.", + "items": { + "type": "string", + "enum": [ + "Local", + "Remote" + ] + } + }, + "clusterIds": { + "type": "array", + "description": "list of cluster IDs.", + "items": { + "type": "string" + } + }, + "archivalComplianceStatus": { + "type": "array", + "description": "Archival compliance status.", + "items": { + "$ref": "#/definitions/ComplianceStatus" + } + }, + "localCdpStatus": { + "type": "array", + "description": "Local CDP Status.", + "items": { + "$ref": "#/definitions/CdpLocalStatus" + } + }, + "latestLocalSnapshotIndexState": { + "type": "array", + "description": "Last snapshot index state.", + "items": { + "$ref": "#/definitions/ReportSnapshotIndexState" + } + }, + "objectIndexType": { + "type": "array", + "description": "Object index types.", + "items": { + "$ref": "#/definitions/ReportableObjectIndexType" + } + }, + "cdpReplicationStatus": { + "type": "array", + "description": "CDP replication status.", + "items": { + "$ref": "#/definitions/CdpReplicationStatus" + } + }, + "timeRange": { + "description": "Time range over which to calculate count columns of reports based on SLA.", + "$ref": "#/definitions/ComplianceReportTimeRangeFilter" + }, + "replicationComplianceStatus": { + "type": "array", + "description": "Replication. compliance status.", + "items": { + "$ref": "#/definitions/ComplianceStatus" + } + }, + "startMethod": { + "type": "array", + "description": "Job start method.", + "items": { + "$ref": "#/definitions/StartMethod" + } + } + } + }, + "LogReportDetailView": { + "type": "object", + "required": [ + "reportTemplate", + "reportType" + ], + "properties": { + "reportType": { + "type": "string", + "description": "Type of the report custom vs default." + }, + "reportTemplate": { + "type": "string", + "description": "Template of the report." + } + } + }, + "RecoveryPointType": { + "type": "string", + "description": "Recovery point type.", + "enum": [ + "Snapshot", + "PointInTime", + "LogSequenceNumber", + "RangeInTime" + ] + }, + "ReportCreate": { + "type": "object", + "required": [ + "name", + "reportTemplate" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the report." + }, + "reportTemplate": { + "description": "The template this report is based on.", + "$ref": "#/definitions/ReportTemplateName" + } + } + }, + "ReportDetail": { + "allOf": [ + { + "$ref": "#/definitions/ReportUpdate" + }, + { + "$ref": "#/definitions/ReportSummary" + } + ] + }, + "ReportSnapshotConsistency": { + "type": "string", + "description": "Snapshot consistency.", + "enum": [ + "Inconsistent", + "CrashConsistent", + "FileSystemConsistent", + "VssConsistent", + "AppConsistent", + "Unknown" + ] + }, + "ReportSnapshotIndexState": { + "type": "string", + "description": "Index state of a snapshot.", + "enum": [ + "Success", + "Failed", + "Pending" + ] + }, + "ReportSubscriptionOwner": { + "type": "object", + "required": [ + "userId", + "username" + ], + "properties": { + "userId": { + "type": "string", + "description": "User ID for the owner of an email subscription object." + }, + "username": { + "type": "string", + "description": "Username for the owner of an email subscription object." + } + } + }, + "ReportSubscriptionStatus": { + "type": "string", + "description": "Status of a report subscription.", + "enum": [ + "Active", + "Suspended", + "Unknown" + ] + }, + "ReportSummary": { + "allOf": [ + { + "$ref": "#/definitions/ReportCreate" + }, + { + "type": "object", + "required": [ + "id", + "reportType", + "updateStatus" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the report." + }, + "reportType": { + "type": "string", + "description": "Type of the report.", + "enum": [ + "Canned", + "Custom" + ] + }, + "updateStatus": { + "description": "Update status of a report.", + "$ref": "#/definitions/ReportUpdateStatus" + }, + "updatedTime": { + "type": "string", + "description": "Last updated time." + } + } + } + ] + }, + "ReportSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/ReportSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ReportTableRequest": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "format": "int32", + "description": "Maximum number of table rows to return." + }, + "sortBy": { + "type": "string", + "description": "Sort the returned table based off the specified attribute.", + "enum": [ + "Hour", + "Day", + "Month", + "Quarter", + "Year", + "SlaDomain", + "ReplicationTarget", + "ArchivalTarget", + "TaskStatus", + "TaskType", + "Location", + "ObjectName", + "ObjectType", + "ObjectIndexType", + "ClusterLocation", + "ComplianceStatus", + "Organization", + "RecoveryPoint", + "RecoveryPointType", + "Username", + "FailureReason", + "SnapshotConsistency", + "QueuedTime", + "StartTime", + "EndTime", + "Duration", + "DataTransferred", + "LogicalDataProtected", + "DataStored", + "NumFilesTransferred", + "EffectiveThroughput", + "DedupRatio", + "LogicalDedupRatio", + "DataReductionPercent", + "LogicalDataReductionPercent", + "TaskCount", + "SuccessfulTaskCount", + "CanceledTaskCount", + "FailedTaskCount", + "AverageDuration", + "ObjectCount", + "TotalLocalStorage", + "TotalReplicaStorage", + "TotalArchiveStorage", + "LocalStorageGrowth", + "ArchiveStorageGrowth", + "ReplicaStorageGrowth", + "ProtectedOn", + "InComplianceCount", + "NonComplianceCount", + "ArchivalInComplianceCount", + "ArchivalNonComplianceCount", + "TotalSnapshots", + "MissedLocalSnapshots", + "MissedArchivalSnapshots", + "LocalSnapshots", + "ReplicaSnapshots", + "ArchiveSnapshots", + "LatestLocalSnapshot", + "LocalCdpStatus", + "PercentLocal24HourCdpHealthy", + "LocalCdpLogStorage", + "LocalCdpThroughput", + "LatestLocalSnapshotIndexState", + "LocalIndexedSnapshotsCount", + "LocalUnindexedSnapshotsCount", + "LocalPendingForIndexSnapshotsCount", + "LatestLocalIndexedSnapshotTime", + "CdpReplicationStatus", + "ReplicationInComplianceCount", + "ReplicationNonComplianceCount", + "StartMethod" + ] + }, + "sortOrder": { + "description": "Order by which to sort the returned table.", + "$ref": "#/definitions/SortOrder" + }, + "cursor": { + "type": "string", + "description": "Fetches all rows after given row cursor." + }, + "objectName": { + "type": "string", + "description": "Search for objects with in the table data. Search via object name prefix." + }, + "requestFilters": { + "description": "Filters table data by SLA Domain ID, task type, task status, object type, compliance status, cluster location, CDP status, latest snapshot index state, and CDP replication status.", + "$ref": "#/definitions/RequestFilters" + }, + "requestExclusionFilters": { + "description": "Exclusion Filters of table data, on CDP status.", + "$ref": "#/definitions/RequestExclusionFilters" + } + } + }, + "ReportTemplateDetail": { + "type": "object", + "required": [ + "chart0", + "chart1", + "filters", + "table" + ], + "properties": { + "filters": { + "description": "Filter properties to update.", + "$ref": "#/definitions/FilterSummary" + }, + "chart0": { + "description": "Chart0 properties to update.", + "$ref": "#/definitions/ChartSummary" + }, + "chart1": { + "description": "Chart1 properties to update.", + "$ref": "#/definitions/ChartSummary" + }, + "table": { + "description": "Table properties to update.", + "$ref": "#/definitions/TableSummary" + } + } + }, + "ReportTemplateName": { + "type": "string", + "description": "Name of a report template to use when making calls to the /report endpoint.", + "enum": [ + "CapacityOverTime", + "ObjectProtectionSummary", + "ObjectTaskSummary", + "ObjectIndexingSummary", + "ProtectionTasksDetails", + "ProtectionTasksSummary", + "RecoveryTasksDetails", + "SlaComplianceSummary", + "SystemCapacity" + ] + }, + "ReportUpdate": { + "type": "object", + "required": [ + "chart0", + "chart1", + "filters", + "name", + "table" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the report." + }, + "filters": { + "description": "Filter properties to update.", + "$ref": "#/definitions/FilterSummary" + }, + "chart0": { + "description": "Chart0 properties to update.", + "$ref": "#/definitions/ChartSummary" + }, + "chart1": { + "description": "Chart1 properties to update.", + "$ref": "#/definitions/ChartSummary" + }, + "table": { + "description": "Table properties to update.", + "$ref": "#/definitions/TableSummary" + } + } + }, + "ReportUpdateStatus": { + "type": "string", + "description": "Update status of a custom report. If there is no sliqte file available for the report, the status will be updating.", + "enum": [ + "Updating", + "Ready" + ] + }, + "ReportableObjectIndexType": { + "type": "string", + "description": "Index types that define whether a reportable object is indexable.", + "enum": [ + "Indexable", + "Unindexable", + "Unprotected" + ] + }, + "ReportableObjectType": { + "type": "string", + "description": "Object types that are reported on.", + "enum": [ + "AppBlueprint", + "Ec2Instance", + "Hdfs", + "HypervVirtualMachine", + "LinuxFileset", + "ManagedVolume", + "Mssql", + "NutanixVirtualMachine", + "OracleDatabase", + "SapHanaDatabase", + "ShareFileset", + "StorageArrayVolumeGroup", + "VcdVapp", + "VmwareVirtualMachine", + "WindowsFileset", + "WindowsVolumeGroup" + ] + }, + "ReportableTaskStatus": { + "type": "string", + "description": "Status of a terminated task in report.", + "enum": [ + "Succeeded", + "Failed", + "Canceled" + ] + }, + "ReportableTaskType": { + "type": "string", + "description": "Task type.", + "enum": [ + "Backup", + "LogBackup", + "Replication", + "LogReplication", + "Archival", + "ArchivalTiering", + "LogArchival", + "LogShipping", + "Instantiate", + "LiveMount", + "InstantRecovery", + "Export", + "Restore", + "InPlaceRecovery", + "DownloadFile", + "RestoreFile", + "Conversion", + "Index", + "Validation" + ] + }, + "RequestExclusionFilters": { + "type": "object", + "properties": { + "localCdpStatus": { + "description": "Local CDP Status to exclude.", + "$ref": "#/definitions/CdpLocalStatus" + } + } + }, + "RequestFilters": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "Organization IDs." + }, + "slaDomain": { + "type": "string", + "description": "SLA domain IDs." + }, + "retentionSlaDomain": { + "type": "string", + "description": "Retention SLA domain IDs." + }, + "taskType": { + "description": "Task type.", + "$ref": "#/definitions/ReportableTaskType" + }, + "taskStatus": { + "description": "Status of the task.", + "$ref": "#/definitions/ReportableTaskStatus" + }, + "objectType": { + "description": "Object types.", + "$ref": "#/definitions/ReportableObjectType" + }, + "objectState": { + "type": "string", + "description": "Object state is used to describe the state of both SLA Domains and data objects. The Active state refers to an SLA Domain or a data object that is actively in use by Rubrik data management. The Archived state refers to an SLA Domain or a data object that was removed from active Rubrik data management. The Relic state refers to an Archived data object with existing unexpired snapshots.", + "enum": [ + "Active", + "Archived", + "Relic" + ] + }, + "complianceStatus": { + "description": "Compliance status.", + "$ref": "#/definitions/ComplianceStatus" + }, + "clusterLocation": { + "type": "string", + "description": "Cluster locations.", + "enum": [ + "Local", + "Remote" + ] + }, + "clusterId": { + "type": "string", + "description": "Cluster ID." + }, + "archivalComplianceStatus": { + "description": "Archival compliance status.", + "$ref": "#/definitions/ComplianceStatus" + }, + "localCdpStatus": { + "description": "Local CDP Status.", + "$ref": "#/definitions/CdpLocalStatus" + }, + "latestLocalSnapshotIndexState": { + "description": "Last snapshot index state.", + "$ref": "#/definitions/ReportSnapshotIndexState" + }, + "objectIndexType": { + "description": "Object index types.", + "$ref": "#/definitions/ReportableObjectIndexType" + }, + "cdpReplicationStatus": { + "description": "CDP replication status.", + "$ref": "#/definitions/CdpReplicationStatus" + }, + "awaitingFirstFull": { + "type": "boolean", + "description": "Indicates if the cluster already has a first full snapshot of this object." + }, + "replicationComplianceStatus": { + "description": "Replication compliance status.", + "$ref": "#/definitions/ComplianceStatus" + }, + "startMethod": { + "description": "Job start method.", + "$ref": "#/definitions/StartMethod" + } + } + }, + "Runway": { + "type": "object", + "required": [ + "days" + ], + "properties": { + "days": { + "type": "integer", + "format": "int64" + } + } + }, + "SortableReportAttribute": { + "type": "string", + "description": "Report. can be sorted by the report name, or by the value of reportTemplate or reportType.", + "enum": [ + "name", + "reportTemplate", + "reportType" + ] + }, + "StartMethod": { + "type": "string", + "description": "Job start method.", + "enum": [ + "SLADriven", + "OnDemand" + ] + }, + "TableData": { + "allOf": [ + { + "$ref": "#/definitions/TableSummary" + }, + { + "type": "object", + "required": [ + "dataGrid", + "hasMore", + "reportTemplate" + ], + "properties": { + "reportTemplate": { + "description": "The template this report is based on.", + "$ref": "#/definitions/ReportTemplateName" + }, + "dataGrid": { + "type": "array", + "description": "Table rows.", + "items": { + "type": "array", + "description": "A row of the table.", + "items": { + "type": "string", + "description": "A single data point for the table." + } + } + }, + "hasMore": { + "type": "boolean", + "description": "True if the list has more elements." + }, + "cursor": { + "type": "string", + "description": "Cursor of the last table row sent in the response. Used for setting the cursor when getting the next page of the table." + } + } + } + ] + }, + "TableSummary": { + "type": "object", + "required": [ + "columns" + ], + "properties": { + "columns": { + "type": "array", + "description": "Columns for the table.", + "items": { + "type": "string", + "enum": [ + "Hour", + "Day", + "Month", + "Quarter", + "Year", + "SlaDomain", + "ReplicationTarget", + "ArchivalTarget", + "TaskStatus", + "TaskType", + "Location", + "ObjectName", + "ObjectType", + "ObjectIndexType", + "ClusterLocation", + "ComplianceStatus", + "ArchivalComplianceStatus", + "Organization", + "RecoveryPoint", + "RecoveryPointType", + "Username", + "FailureReason", + "SnapshotConsistency", + "QueuedTime", + "StartTime", + "EndTime", + "Duration", + "DataTransferred", + "LogicalDataProtected", + "LogicalObjectSize", + "DataStored", + "NumFilesTransferred", + "EffectiveThroughput", + "DedupRatio", + "LogicalDedupRatio", + "DataReductionPercent", + "LogicalDataReductionPercent", + "TaskCount", + "SuccessfulTaskCount", + "CanceledTaskCount", + "FailedTaskCount", + "AverageDuration", + "ObjectCount", + "TotalLocalStorage", + "TotalReplicaStorage", + "TotalArchiveStorage", + "LocalStorageGrowth", + "ArchiveStorageGrowth", + "ReplicaStorageGrowth", + "ProtectedOn", + "InComplianceCount", + "NonComplianceCount", + "ArchivalInComplianceCount", + "ArchivalNonComplianceCount", + "TotalSnapshots", + "MissedLocalSnapshots", + "MissedArchivalSnapshots", + "LocalSnapshots", + "ReplicaSnapshots", + "ArchiveSnapshots", + "LatestLocalSnapshot", + "LocalCdpStatus", + "PercentLocal24HourCdpHealthy", + "LocalCdpLogStorage", + "LocalCdpThroughput", + "LatestLocalSnapshotIndexState", + "LocalIndexedSnapshotsCount", + "LocalUnindexedSnapshotsCount", + "LocalPendingForIndexSnapshotsCount", + "LatestLocalIndexedSnapshotTime", + "CdpReplicationStatus", + "ReplicationSnapshotLag", + "ReplicationComplianceStatus", + "MissedReplicationSnapshots", + "ReplicationDataLag", + "ReplicationInComplianceCount", + "ReplicationNonComplianceCount", + "StartMethod" + ] + } + } + } + }, + "AsyncRequestStatus": { + "type": "object", + "required": [ + "id", + "links", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "The ID of the request object used to poll the status." + }, + "status": { + "type": "string", + "description": "Status of the ID." + }, + "progress": { + "type": "number", + "format": "double", + "description": "The current percentage progress of the asynchronous request." + }, + "startTime": { + "type": "string", + "format": "date-time", + "description": "The start time of the request." + }, + "endTime": { + "type": "string", + "format": "date-time", + "description": "The end time of the request." + }, + "nodeId": { + "type": "string", + "description": "The ID of the node where the job ran." + }, + "error": { + "description": "Any errors encountered.", + "$ref": "#/definitions/RequestErrorInfo" + }, + "links": { + "type": "array", + "description": "References to any related objects.", + "items": { + "$ref": "#/definitions/Link" + } + } + } + }, + "BatchAsyncRequest": { + "type": "object", + "required": [ + "ids" + ], + "properties": { + "ids": { + "type": "array", + "description": "Batch of the request object IDs used to poll the status.", + "items": { + "type": "string" + } + } + } + }, + "BatchAsyncRequestStatus": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "The asynchronous request status of a batch request.", + "items": { + "$ref": "#/definitions/AsyncRequestStatus" + } + } + } + }, + "RequestErrorInfo": { + "type": "object", + "required": [ + "message" + ], + "properties": { + "message": { + "type": "string", + "description": "The error message for failed IDs." + } + } + }, + "RequestFailedException": { + "type": "object", + "required": [ + "errorType", + "message" + ], + "properties": { + "errorType": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "string" + }, + "param": { + "type": "string" + } + } + }, + "StringResponse": { + "type": "object", + "required": [ + "response" + ], + "properties": { + "response": { + "type": "string" + } + } + }, + "ManagedObjectDescendantCountSapHanaFields": { + "type": "object", + "properties": { + "sapHanaDatabase": { + "type": "integer", + "format": "int32", + "description": "Number of SAP HANA databases." + } + } + }, + "SapHanaDatabaseLogBackup": { + "type": "object", + "required": [ + "backupId", + "logBackupfiles", + "sapHanaEndTime", + "sapHanaStartTime" + ], + "properties": { + "backupId": { + "type": "integer", + "format": "int64", + "description": "ID of the SAP HANA backup. This ID uniquely identifies a backup to SAP HANA. All backup files from a single backup share the same backup ID." + }, + "sapHanaStartTime": { + "type": "string", + "format": "date-time", + "description": "Start time, in UTC, of the log backup, with respect to the SAP HANA system." + }, + "sapHanaEndTime": { + "type": "string", + "format": "date-time", + "description": "End time, in UTC, of the log backup, with respect to the SAP HANA system." + }, + "logBackupfiles": { + "type": "array", + "description": "Array containing the details of the files in the log backup.", + "items": { + "$ref": "#/definitions/SapHanaLogBackupFile" + } + } + }, + "description": "Details about a log backup synchronized from a SAP HANA database." + }, + "SapHanaDatabaseLogSnapshotDetail": { + "type": "object", + "required": [ + "date", + "dbId", + "id", + "logBackups" + ], + "properties": { + "id": { + "type": "string" + }, + "date": { + "type": "string", + "format": "date-time" + }, + "dbId": { + "type": "string", + "description": "ID of the SAP HANA database to which the log snapshot belongs." + }, + "expirationDate": { + "type": "string", + "format": "date-time" + }, + "logBackups": { + "type": "array", + "description": "Array containing the details of the log backups synchronized from the SAP HANA database as a part of this log snapshot.", + "items": { + "$ref": "#/definitions/SapHanaDatabaseLogBackup" + } + } + }, + "description": "Details about a log snapshot created by synchronizing multiple log backups from an SAP HANA database." + }, + "SapHanaDatabaseLogSnapshotDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SapHanaDatabaseLogSnapshotDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SapHanaLogBackupFile": { + "type": "object", + "required": [ + "backupId", + "backupSizeInBytes", + "destinationPath", + "externalBackupId", + "logPositionInterval", + "sourceId" + ], + "properties": { + "backupId": { + "type": "integer", + "format": "int64", + "description": "ID of the SAP HANA backup. This ID uniquely identifies a backup to SAP HANA. All backup files from a single backup share the same backup ID." + }, + "externalBackupId": { + "type": "string", + "description": "ID of the external backup set by the backing storage for the corresponding log backup." + }, + "sourceId": { + "type": "integer", + "format": "int64", + "description": "ID of the persistence volume of the SAP HANA database's volume to which this log file belongs. Has a value of 0 for catalog files." + }, + "backupSizeInBytes": { + "type": "integer", + "format": "int64", + "description": "Size, in bytes, of the log file." + }, + "destinationPath": { + "type": "string", + "description": "Path to the log file." + }, + "logPositionInterval": { + "description": "Log positions of the oldest and newest log entries.", + "$ref": "#/definitions/SapHanaLogPositionInterval" + } + }, + "description": "Details about a log file in a log backup that was synchronized from an SAP HANA database." + }, + "SapHanaLogPositionInterval": { + "type": "object", + "required": [ + "newestLogPosition", + "oldestLogPosition" + ], + "properties": { + "oldestLogPosition": { + "type": "integer", + "format": "int64", + "description": "The log position of the oldest log entry contained in the log file." + }, + "newestLogPosition": { + "type": "integer", + "format": "int64", + "description": "The log position of the newest log entry contained in the log file." + } + }, + "description": "Details about the log positions in a backup log file." + }, + "SapHanaBackintCompletionStatus": { + "type": "string", + "description": "Completion status of the task performed by this backint (Rubrik Agent) invocation.", + "enum": [ + "Success", + "Failure", + "Warning" + ] + }, + "SapHanaBackintFunction": { + "type": "string", + "description": "The function performed by this backint (Rubrik Agent) invocation.", + "enum": [ + "backup", + "restore", + "inquire", + "delete" + ] + }, + "SapHanaBackintLogs": { + "type": "object", + "required": [ + "hostname", + "id", + "logs", + "pid" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of managed volume to which the snapshot is being backed up." + }, + "snapshotId": { + "type": "string", + "description": "The snapshot ID corresponding to this backint (Rubrik Agent) invocation." + }, + "hostname": { + "type": "string", + "description": "Hostname of the node on which this backint is run." + }, + "logs": { + "type": "string", + "description": "logs sent by the backint client." + }, + "pid": { + "type": "string", + "description": "Process ID of this backint invocation." + } + } + }, + "SapHanaBackintRequestStatus": { + "type": "object", + "required": [ + "function", + "hostname", + "id", + "pid", + "startTime", + "status" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of managed volume to which the snapshot is being backed up." + }, + "snapshotId": { + "type": "string", + "description": "snapshot ID corresponding to this backint (Rubrik Agent) invocation." + }, + "hostname": { + "type": "string", + "description": "Hostname of the node on which this backint is run." + }, + "backupId": { + "type": "string", + "description": "a unique ID given by SAP HANA to each backup request." + }, + "status": { + "description": "Completion status of the task performed by this backint invocation.", + "$ref": "#/definitions/SapHanaBackintCompletionStatus" + }, + "reason": { + "type": "string", + "description": "Information about what failure has occured or why a warning has been raised." + }, + "function": { + "description": "The function performed by this backint invocation.", + "$ref": "#/definitions/SapHanaBackintFunction" + }, + "pid": { + "type": "string", + "description": "Process ID of this backint invocation." + }, + "startTime": { + "type": "string", + "description": "Date and time when this backint invocation started." + } + } + }, + "ScriptErrorAction": { + "type": "string", + "description": "Action to take if the script returns an error or times out.", + "enum": [ + "abort", + "continue" + ] + }, + "BrowseResponse": { + "type": "object", + "properties": { + "filename": { + "type": "string", + "description": "The name of the file." + }, + "path": { + "type": "string", + "description": "The complete path of the file." + }, + "lastModified": { + "type": "string" + }, + "size": { + "type": "integer", + "format": "int64" + }, + "fileMode": { + "type": "string", + "description": "The type of file, either a regular file or a directory." + }, + "statusMessage": { + "type": "string", + "description": "Description about the status." + }, + "unreadable": { + "type": "integer", + "format": "int32", + "description": "Reason the file is unreadable. Undefined if the file is readable." + } + } + }, + "BrowseResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/BrowseResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "FileVersion": { + "type": "object", + "properties": { + "lastModified": { + "type": "string" + }, + "size": { + "type": "integer", + "format": "int64" + }, + "snapshotId": { + "type": "string", + "description": "The snapshot this file belongs to." + }, + "fileMode": { + "type": "string", + "description": "The type of file, either a regular file or a directory." + }, + "source": { + "type": "string", + "description": "The location where the file is stored, either in the cloud or locally." + } + } + }, + "GlobalSearchApiRequest": { + "type": "object", + "required": [ + "regex", + "snappableIds" + ], + "properties": { + "snappableIds": { + "type": "array", + "description": "Managed IDs of snappables to search across.", + "items": { + "type": "string" + } + }, + "regex": { + "type": "string", + "description": "Regex to match." + } + } + }, + "GlobalSearchApiResponse": { + "type": "object", + "required": [ + "dirs", + "filename", + "isFile", + "snappableId", + "snappableName" + ], + "properties": { + "dirs": { + "type": "array", + "description": "List of directories containing the file.", + "items": { + "type": "string" + } + }, + "filename": { + "type": "string", + "description": "Filename of the file." + }, + "snappableId": { + "type": "string", + "description": "Managed ID of the snappable containing the file." + }, + "snappableName": { + "type": "string", + "description": "Name of the snappable containing the file." + }, + "isFile": { + "type": "boolean", + "description": "True if the returned path is not a directory." + } + } + }, + "GlobalSearchApiResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/GlobalSearchApiResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SearchResponse": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "filename": { + "type": "string", + "description": "Just the filename without the whole path." + }, + "fileVersions": { + "type": "array", + "items": { + "$ref": "#/definitions/FileVersion" + } + } + } + }, + "SearchResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SearchResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SnapshotSearchResponse": { + "type": "object", + "required": [ + "dir", + "fileMode", + "filename", + "lastModified", + "size" + ], + "properties": { + "dir": { + "type": "string", + "description": "Full Path of the file." + }, + "filename": { + "type": "string", + "description": "Filename of the file." + }, + "size": { + "type": "integer", + "format": "int64" + }, + "lastModified": { + "type": "string" + }, + "fileMode": { + "type": "string", + "description": "Type, either a file or a directory." + }, + "unreadable": { + "type": "integer", + "format": "int32", + "description": "Reason the file is unreadable. Undefined if readable." + } + } + }, + "SnapshotSearchResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SnapshotSearchResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "KeyRotationDetail": { + "type": "object", + "required": [ + "keyProtection", + "keyRecovery", + "nodeId", + "rotationId", + "status" + ], + "properties": { + "rotationId": { + "type": "string", + "description": "ID of the key rotation." + }, + "nodeId": { + "type": "string", + "description": "ID of the node rotating keys." + }, + "status": { + "type": "string", + "description": "Status of the key rotation.", + "enum": [ + "queued", + "inProgress", + "success", + "aborted" + ] + }, + "keyProtection": { + "type": "string", + "description": "Target key protection method.", + "enum": [ + "tpm", + "kmip" + ] + }, + "keyRecovery": { + "type": "boolean", + "description": "Whether to enable Rubrik to recover the encryption keys in the event of a hardware failure." + }, + "startTime": { + "type": "string", + "format": "date-time" + }, + "endTime": { + "type": "string", + "format": "date-time" + } + } + }, + "KeyRotationDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/KeyRotationDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "KeyRotationOptions": { + "type": "object", + "required": [ + "keyProtection", + "keyRecovery" + ], + "properties": { + "keyProtection": { + "type": "string", + "description": "Target key protection method.", + "enum": [ + "tpm", + "kmip" + ] + }, + "keyRecovery": { + "type": "boolean", + "description": "Whether to enable Rubrik to recover the encryption keys in the event of a hardware failure." + }, + "encryptionPassword": { + "type": "string", + "description": "To initiate a new key rotation on a cluster that is already protected by password-based encryption at rest, provide the encryption password.", + "x-secret": true + }, + "newEncryptionPassword": { + "type": "string", + "description": "A new encryption password is required when password-based encryption is first set up. For a cluster that has password-based encryption set up, a new encryption password can be set during key rotation, although this is optional. If a new encryption password is not provided, the existing password is still valid.", + "x-secret": true + } + } + }, + "PasswordInput": { + "type": "object", + "required": [ + "password" + ], + "properties": { + "password": { + "type": "string", + "description": "Password.", + "x-secret": true + } + } + }, + "PasswordStrength": { + "type": "object", + "required": [ + "isStrong" + ], + "properties": { + "isStrong": { + "type": "boolean", + "description": "Boolean value representing whether a specified string is determined to be a strong password. The value is 'true' when the string is determined to be a strong password and 'false' when it is not." + }, + "reason": { + "type": "string", + "description": "Reason why a password failed the strength test." + } + } + }, + "Pkcs10CertificateSigningRequest": { + "type": "object", + "required": [ + "csr" + ], + "properties": { + "csr": { + "type": "string", + "description": "Base64 encoded PKCS#10 certificate signing request. The request should start with -----BEGIN CERTIFICATE REQUEST-----." + } + } + }, + "RksupportCredStatus": { + "type": "object", + "required": [ + "shouldUpdateClusterRksupportCred" + ], + "properties": { + "shouldUpdateClusterRksupportCred": { + "type": "boolean", + "description": "Boolean value that determines whether to update the cluster-wide rksupport credential. When false no update is needed. When true the credentials require an update." + } + } + }, + "RksupportCredUpdateDetails": { + "type": "object", + "properties": { + "communityUserCredentials": { + "$ref": "#/definitions/CommunityUserCredentials" + }, + "registrationDetails": { + "$ref": "#/definitions/RegistrationDetails" + } + } + }, + "SshConfig": { + "type": "object", + "required": [ + "isEnabled" + ], + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Whether SSH is enabled." + } + } + }, + "ZxcvbnConfig": { + "type": "object", + "required": [ + "enableZxcvbn" + ], + "properties": { + "enableZxcvbn": { + "type": "boolean", + "description": "Boolean value that indicates whether ZXCVBN should be used to validate newly created local user passwords." + } + } + }, + "ZxcvbnStatus": { + "type": "object", + "required": [ + "isZxcvbnInUseForLocalUsers" + ], + "properties": { + "isZxcvbnInUseForLocalUsers": { + "type": "boolean", + "description": "Boolean value that indicates whether ZXCVBN is used to validate newly created local user passwords." + } + } + }, + "ApiSessionRequest": { + "type": "object", + "properties": { + "expiration": { + "type": "integer", + "format": "int32", + "description": "This value specifies an interval in minutes. The token expires at the end of the interval. By default, this value is 1 hour. This value cannot exceed 365 days.\n" + }, + "tag": { + "type": "string", + "description": "Name assigned to the token by the user." + } + } + }, + "BulkDeleteSessionsRequest": { + "type": "object", + "required": [ + "tokenIds" + ], + "properties": { + "tokenIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "userId": { + "type": "string", + "description": "ID of the user to delete sessions for." + } + } + }, + "IdpParams": { + "type": "object", + "required": [ + "groups" + ], + "properties": { + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "email": { + "type": "string", + "description": "Email address of the IdP user." + } + } + }, + "SessionInitParams": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "description": "Bind the new session to the specified organization. When this parameter is not specified, the session will be bound to an organization chosen according to the user's preferences and authorizations.\n" + }, + "apiToken": { + "description": "Provide details for ApiToken to generate a session of type ApiToken.\n", + "$ref": "#/definitions/ApiSessionRequest" + }, + "idpParams": { + "description": "Email and group info for IdP user.", + "$ref": "#/definitions/IdpParams" + } + } + }, + "SessionRequest": { + "type": "object", + "properties": { + "initParams": { + "description": "Provide details to initialize session request. This parameter is ignored when mfaParams is specified.\n", + "$ref": "#/definitions/SessionInitParams" + }, + "mfaParams": { + "description": "Provide multi-factor authentication details when using that form of authentication.\n", + "$ref": "#/definitions/MfaAuthRequest" + } + } + }, + "SessionResponse": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "description": "Status of the session request.", + "$ref": "#/definitions/MfaAuthenticationStatus" + }, + "session": { + "description": "Session details when the request succeeds.", + "$ref": "#/definitions/SessionSummary" + }, + "mfaResponse": { + "description": "Details for MFA when required.", + "$ref": "#/definitions/MfaAuthResponse" + } + } + }, + "SessionSummary": { + "type": "object", + "required": [ + "id", + "organizationId", + "token", + "userId" + ], + "properties": { + "id": { + "type": "string" + }, + "organizationId": { + "type": "string" + }, + "userId": { + "type": "string" + }, + "token": { + "type": "string" + }, + "expiration": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "TokenSummary": { + "type": "object", + "required": [ + "creationTime", + "id", + "lastUsageTime", + "organizationId", + "sessionType", + "tag", + "userId" + ], + "properties": { + "id": { + "type": "string" + }, + "userId": { + "type": "string" + }, + "organizationId": { + "type": "string" + }, + "lastUsageTime": { + "type": "string", + "format": "date-time" + }, + "creationTime": { + "type": "string", + "format": "date-time" + }, + "sessionType": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "lastRequestPath": { + "type": "string" + }, + "lastUsageSourceIp": { + "type": "string" + }, + "expirationTime": { + "type": "string", + "format": "date-time" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "TokenSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/TokenSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SmbConfig": { + "type": "object", + "required": [ + "enforceSmbSecurity" + ], + "properties": { + "enforceSmbSecurity": { + "type": "boolean", + "description": "A Boolean that specifies whether or not the cluster enforces SMB security. When this value is 'true,' SMB security is enforced. When this value is 'false,' SMB security is not enforced. The default value is 'false.'\n" + } + } + }, + "SmbDomainAddRequest": { + "allOf": [ + { + "$ref": "#/definitions/SmbDomainJoinRequest" + }, + { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Specifies name to identify Active Directory domain for SMB authentication.\n" + } + } + } + ] + }, + "SmbDomainDetail": { + "type": "object", + "required": [ + "isStickySmbService", + "name", + "status" + ], + "properties": { + "name": { + "type": "string", + "description": "Specifies name to identify Active Directory domain for SMB authentication.\n" + }, + "status": { + "description": "State of the domain.", + "$ref": "#/definitions/SmbDomainStatus" + }, + "serviceAccount": { + "type": "string", + "description": "Specifies the service principal name (SPN) used for joining the Active Directory domain.\n" + }, + "isStickySmbService": { + "type": "boolean", + "description": "A Boolean value that determines whether to run the SMB service when no shares are exposed. When this value is 'true,' the SMB service runs even when no shares are exposed. When this value is 'false,' the SMB service does not run when no shares are exposed.\n" + } + } + }, + "SmbDomainDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SmbDomainDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "SmbDomainJoinRequest": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string", + "description": "Username for joining Active Directory." + }, + "password": { + "type": "string", + "description": "Password for joining Active Directory.", + "x-secret": true + }, + "domainControllers": { + "type": "array", + "items": { + "type": "string", + "description": "Specifies an ordered list of domain controllers that are used to communicate with Active Directory domains.\n" + } + }, + "computerAccountName": { + "type": "string", + "description": "Specifies the computer user and service principal name to create while joining Active Directory. Microsoft requires that this name should be a valid NETBIOS name and must be unique across the forest of this Active directory.\n" + }, + "creationOrganizationUnit": { + "type": "string", + "description": "Specifies organization unit to create the computer user after joining Active Directory.\n" + }, + "isStickySmbService": { + "type": "boolean", + "description": "A Boolean value that determines whether to run the SMB service when no shares are exposed. When this value is 'true,' the SMB service runs even when no shares are exposed. When this value is 'false,' the SMB service does not run when no shares are exposed.\n" + } + } + }, + "SmbDomainStatus": { + "type": "string", + "description": "Status of the current authentication attempt.\n", + "enum": [ + "NotConfigured", + "Configured", + "Failed" + ] + }, + "SendEmailParams": { + "type": "object", + "required": [ + "ids" + ], + "properties": { + "ids": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "SmtpInstanceDefinition": { + "type": "object", + "required": [ + "fromEmailId", + "smtpHostname", + "smtpPort", + "smtpSecurity" + ], + "properties": { + "smtpHostname": { + "type": "string" + }, + "smtpPort": { + "type": "integer", + "format": "int64" + }, + "smtpSecurity": { + "type": "string" + }, + "smtpUsername": { + "type": "string" + }, + "smtpPassword": { + "type": "string" + }, + "fromEmailId": { + "type": "string" + }, + "certificateId": { + "type": "string" + } + } + }, + "SmtpInstanceDetail": { + "type": "object", + "required": [ + "fromEmailId", + "id", + "smtpHostname", + "smtpPort", + "smtpSecurity" + ], + "properties": { + "id": { + "type": "string" + }, + "smtpHostname": { + "type": "string" + }, + "smtpPort": { + "type": "integer", + "format": "int64" + }, + "smtpSecurity": { + "type": "string" + }, + "smtpUsername": { + "type": "string" + }, + "fromEmailId": { + "type": "string" + }, + "certificateId": { + "type": "string" + } + } + }, + "SmtpInstanceDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SmtpInstanceDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "UpdateSmtpInstanceDefinition": { + "type": "object", + "properties": { + "smtpHostname": { + "type": "string" + }, + "smtpPort": { + "type": "integer", + "format": "int64" + }, + "smtpSecurity": { + "type": "string" + }, + "smtpUsername": { + "type": "string" + }, + "smtpPassword": { + "type": "string" + }, + "fromEmailId": { + "type": "string" + }, + "certificateId": { + "type": "string" + } + } + }, + "SnappablePrivilegeStatus": { + "type": "string", + "description": "Whether the data source passes additional privilege checks.", + "enum": [ + "Protectable" + ] + }, + "CorruptSnapshotChainConfig": { + "type": "object", + "required": [ + "corruptSnapshotId", + "snappableId" + ], + "properties": { + "snappableId": { + "type": "string", + "description": "ID of the snappable whose snapshot to corrupt." + }, + "corruptSnapshotId": { + "type": "string", + "description": "ID of the snapshot to corrupt." + } + } + }, + "InternalExpireSnapshotConfig": { + "type": "object", + "required": [ + "snapshotId" + ], + "properties": { + "snapshotId": { + "type": "string", + "description": "ID of the snapshot to expire." + }, + "locationId": { + "type": "string", + "description": "ID of the location to expire in, or empty if all." + } + } + }, + "InternalGarbageCollectStatusResult": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "type": "boolean", + "description": "Whether the snapshots have been GCed or not." + } + } + }, + "InternalSnapshotArchiveGarbageCollectStatusConfig": { + "type": "object", + "required": [ + "locationId", + "snappableId", + "snapshotDateTuples" + ], + "properties": { + "snapshotDateTuples": { + "type": "array", + "description": "Snapshots to check for garbage collection, specified as :::.", + "items": { + "type": "string" + } + }, + "snappableId": { + "type": "string" + }, + "locationId": { + "type": "string" + } + } + }, + "InternalSnapshotGarbageCollectStatusConfig": { + "type": "object", + "required": [ + "snappableId", + "snapshotIds" + ], + "properties": { + "snappableId": { + "type": "string" + }, + "snapshotIds": { + "type": "array", + "description": "Snapshots IDs to check for garbage collection.", + "items": { + "type": "string" + } + } + } + }, + "SnapshotDiagnosticInfo": { + "type": "object", + "required": [ + "confCompressionType", + "locationIdtoIsFullMap", + "patchCompressionTypes" + ], + "properties": { + "confCompressionType": { + "type": "integer", + "format": "int32", + "description": "Compression type read from config file." + }, + "patchCompressionTypes": { + "type": "array", + "description": "Return a list of pathfile compression type.", + "items": { + "type": "integer", + "format": "int32" + } + }, + "locationIdtoIsFullMap": { + "description": "A map from locationId to boolean. For each location, says whether ALL of the snapshot's content on that location are full snapshots.", + "$ref": "#/definitions/Map_Boolean" + } + } + }, + "SnapshotStorageStats": { + "type": "object", + "required": [ + "historicIngestedBytes", + "ingestedBytes", + "logicalBytes", + "physicalBytes" + ], + "properties": { + "logicalBytes": { + "type": "integer", + "format": "int64", + "description": "Amount of logical bytes the snapshot represents." + }, + "ingestedBytes": { + "type": "integer", + "format": "int64", + "description": "Amount of bytes inferred to be ingested to our system for the snapshot. This may change for existing logical content, as physical representation of content changes." + }, + "physicalBytes": { + "type": "integer", + "format": "int64", + "description": "Amount of bytes physically stored for the snapshot." + }, + "historicIngestedBytes": { + "type": "integer", + "format": "int64", + "description": "Amount of bytes actually ingested for the snapshot. This value reflects the amount of bytes ingested during snapshot capture and remains consistent across different physical representations." + } + } + }, + "SortOrder": { + "type": "string", + "description": "Sort order.", + "enum": [ + "asc", + "desc" + ] + }, + "Datastore": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Name for the ESXi host datastore." + } + } + }, + "DatastoreListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/Datastore" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VsphereLoginInfo": { + "type": "object", + "required": [ + "ip", + "password", + "username" + ], + "properties": { + "ip": { + "type": "string", + "description": "IP address of ESXi host." + }, + "username": { + "type": "string", + "description": "Username of ESXi host." + }, + "password": { + "type": "string", + "description": "Password of ESXi host.", + "x-secret": true + } + } + }, + "ArchivalLocationBandwidthType": { + "type": "string", + "description": "Bandwidth type for the archival location.", + "enum": [ + "Incoming", + "Outgoing", + "All" + ] + }, + "DataLocationUsage": { + "allOf": [ + { + "$ref": "#/definitions/NutanixDataLocationUsage" + }, + { + "$ref": "#/definitions/HypervDataLocationUsage" + }, + { + "type": "object", + "required": [ + "dataArchived", + "dataDownloaded", + "locationId", + "numFilesetsArchived", + "numLinuxFilesetsArchived", + "numManagedVolumesArchived", + "numMssqlDbsArchived", + "numShareFilesetsArchived", + "numStorageArrayVolumeGroupsArchived", + "numVMsArchived", + "numWindowsFilesetsArchived", + "numWindowsVolumeGroupsArchived" + ], + "properties": { + "locationId": { + "type": "string" + }, + "dataDownloaded": { + "type": "integer", + "format": "int64" + }, + "dataArchived": { + "type": "integer", + "format": "int64" + }, + "numVMsArchived": { + "type": "integer", + "format": "int32" + }, + "numFilesetsArchived": { + "type": "integer", + "format": "int32" + }, + "numLinuxFilesetsArchived": { + "type": "integer", + "format": "int32" + }, + "numWindowsFilesetsArchived": { + "type": "integer", + "format": "int32" + }, + "numShareFilesetsArchived": { + "type": "integer", + "format": "int32" + }, + "numMssqlDbsArchived": { + "type": "integer", + "format": "int32" + }, + "numManagedVolumesArchived": { + "type": "integer", + "format": "int32" + }, + "numStorageArrayVolumeGroupsArchived": { + "type": "integer", + "format": "int32" + }, + "numWindowsVolumeGroupsArchived": { + "type": "integer", + "format": "int32" + } + } + } + ] + }, + "DataLocationUsageListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DataLocationUsage" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "OfflineStatSummary": { + "type": "object", + "required": [ + "frequencyInMin", + "lastUpdateTime", + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "frequencyInMin": { + "type": "integer", + "format": "int32" + }, + "lastUpdateTime": { + "type": "string", + "format": "date-time" + } + } + }, + "ProtectedObjectsCount": { + "type": "object", + "required": [ + "numProtected", + "numTotal" + ], + "properties": { + "numTotal": { + "type": "integer", + "format": "int64" + }, + "numProtected": { + "type": "integer", + "format": "int64" + }, + "numNoSla": { + "type": "integer", + "format": "int64" + }, + "numDoNotProtect": { + "type": "integer", + "format": "int64" + } + } + }, + "RemoteClusterStorageStats": { + "type": "object", + "required": [ + "remoteClusterUuid", + "totalStorage" + ], + "properties": { + "remoteClusterUuid": { + "type": "string" + }, + "totalStorage": { + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationStorage": { + "type": "object", + "required": [ + "localVmStorageAcrossAllTargets", + "remoteVmStorageOnPremise" + ], + "properties": { + "remoteVmStorageOnPremise": { + "type": "array", + "items": { + "$ref": "#/definitions/RemoteClusterStorageStats" + } + }, + "localVmStorageAcrossAllTargets": { + "type": "array", + "items": { + "$ref": "#/definitions/RemoteClusterStorageStats" + } + } + } + }, + "SnappableStorageGrowth": { + "type": "object", + "required": [ + "archiveGrowth", + "localGrowth", + "replicaGrowth" + ], + "properties": { + "localGrowth": { + "type": "integer", + "format": "int64" + }, + "replicaGrowth": { + "type": "integer", + "format": "int64" + }, + "archiveGrowth": { + "type": "integer", + "format": "int64" + } + } + }, + "SnappableStorageStats": { + "type": "object", + "required": [ + "lastSnapshotLogicalBytes", + "managedId" + ], + "properties": { + "managedId": { + "type": "string", + "description": "managed ID of the snappable." + }, + "lastSnapshotLogicalBytes": { + "type": "integer", + "format": "int64", + "description": "logical size of last snapshot taken for the snappable." + } + } + }, + "SnappableStorageStatsListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/SnappableStorageStats" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "StorageGrowth": { + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "type": "integer", + "format": "int64" + } + } + }, + "StreamCount": { + "type": "object", + "required": [ + "count" + ], + "properties": { + "count": { + "type": "integer", + "format": "int64" + } + } + }, + "SystemStorageStats": { + "type": "object", + "required": [ + "available", + "cdp", + "lastUpdateTime", + "liveMount", + "miscellaneous", + "pendingSnapshot", + "snapshot", + "total", + "used" + ], + "properties": { + "total": { + "type": "integer", + "format": "int64" + }, + "used": { + "type": "integer", + "format": "int64" + }, + "available": { + "type": "integer", + "format": "int64" + }, + "snapshot": { + "type": "integer", + "format": "int64" + }, + "liveMount": { + "type": "integer", + "format": "int64" + }, + "pendingSnapshot": { + "type": "integer", + "format": "int64", + "description": "The amount of storage used by Managed Volume main exports on a Rubrik cluster." + }, + "cdp": { + "type": "integer", + "format": "int64", + "description": "The amount of storage used by CDP logs on a Rubrik cluster." + }, + "miscellaneous": { + "type": "integer", + "format": "int64", + "description": "All used storage on a Rubrik cluster that does not fit into one of the following storage categories: snapshot, Live Mount, pending snapshot, or CDP. Miscellaneous storage includes storage that is reserved by SDFS and storage used by background jobs. Total storage on a Rubrik cluster can be determined by adding the values of the following storage categories: available, snapshot, Live Mount, pending snapshot, CDP, and miscellaneous." + }, + "lastUpdateTime": { + "type": "string", + "format": "date-time" + } + } + }, + "TimeStat": { + "type": "object", + "required": [ + "stat", + "time" + ], + "properties": { + "time": { + "type": "string", + "format": "date-time" + }, + "stat": { + "type": "integer", + "format": "int64" + } + } + }, + "TimeseriesRequest": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "range": { + "type": "string" + } + } + }, + "VmStorageStats": { + "type": "object", + "required": [ + "exclusivePhysicalBytes", + "id", + "indexStorageBytes", + "ingestedBytes", + "logicalBytes", + "sharedPhysicalBytes" + ], + "properties": { + "id": { + "type": "string" + }, + "logicalBytes": { + "type": "integer", + "format": "int64" + }, + "ingestedBytes": { + "type": "integer", + "format": "int64" + }, + "exclusivePhysicalBytes": { + "type": "integer", + "format": "int64" + }, + "sharedPhysicalBytes": { + "type": "integer", + "format": "int64" + }, + "indexStorageBytes": { + "type": "integer", + "format": "int64" + } + } + }, + "VmStorageStatsListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VmStorageStats" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "StorageArrayHostDetail": { + "type": "object", + "required": [ + "storage_array", + "volumes" + ], + "properties": { + "storage_array": { + "$ref": "#/definitions/StorageArrayDetail" + }, + "volumes": { + "type": "array", + "description": "Array containing the details of the storage array volumes that are part of the specified storage array and connected to the specified host.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeDetail" + } + } + } + }, + "StorageArrayDefinition": { + "type": "object", + "required": [ + "arrayType", + "hostname", + "password", + "username" + ], + "properties": { + "arrayType": { + "$ref": "#/definitions/StorageArrayType" + }, + "hostname": { + "type": "string", + "description": "Resolvable hostname or IPv4 address of the storage array." + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "caCerts": { + "type": "string", + "description": "A digital certificate, or concatenated chain of digital certificates, that permits verification of the public key certificate of the storage array. Each certificate must be an X.509 certificate in Base64 encoded DER format and must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + } + } + }, + "StorageArrayDetail": { + "type": "object", + "required": [ + "arrayType", + "connectionStatus", + "hostname", + "id", + "username" + ], + "properties": { + "id": { + "type": "string" + }, + "arrayType": { + "$ref": "#/definitions/StorageArrayType" + }, + "hostname": { + "type": "string", + "description": "Resolvable hostname or IPv4 address of the storage array." + }, + "username": { + "type": "string" + }, + "caCerts": { + "type": "string", + "description": "A digital certificate, or concatenated chain of digital certificates, that permits verification of the public key certificate of the storage array. Each certificate must be an X.509 certificate in Base64 encoded DER format and must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + }, + "connectionStatus": { + "description": "Connection status of a Storage Array.", + "$ref": "#/definitions/RefreshableObjectConnectionStatus" + } + } + }, + "StorageArrayDetailListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/StorageArrayDetail" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "StorageArrayType": { + "type": "string", + "description": "Storage array type/brand.", + "enum": [ + "PureStorage" + ] + }, + "StorageArrayVolumeDefinition": { + "type": "object", + "required": [ + "name", + "serial", + "storageArrayId" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of a volume object." + }, + "storageArrayId": { + "type": "string", + "description": "ID assigned to the storage array object of a specified volume." + }, + "serial": { + "type": "string", + "description": "Serial number of a volume object." + } + } + }, + "StorageArrayVolumeDetail": { + "allOf": [ + { + "$ref": "#/definitions/StorageArrayVolumeSummary" + } + ] + }, + "StorageArrayVolumeSummary": { + "allOf": [ + { + "$ref": "#/definitions/StorageArrayVolumeDefinition" + }, + { + "type": "object", + "required": [ + "arrayType", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a storage array volume object." + }, + "arrayType": { + "$ref": "#/definitions/StorageArrayType" + }, + "blockDevicePath": { + "type": "string", + "description": "The block device path of the volume on the host." + }, + "mountPoints": { + "type": "array", + "description": "An array containing the mount points of the volume on the host.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "StorageArrayVolumeSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedObjectDescendantCountStorageArrayVolumeGroupFields": { + "type": "object", + "properties": { + "storageArrayVolumeGroup": { + "type": "integer", + "format": "int32", + "description": "Number of storage array volume groups." + } + } + }, + "StorageArrayDownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array containing the full source path of each file and folder that is part of the download job. The array must contain at least one path.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "StorageArrayHierarchyObjectDescendantCount": { + "type": "object", + "properties": { + "array": { + "type": "integer", + "format": "int32" + }, + "volume": { + "type": "integer", + "format": "int32" + }, + "volumeGroup": { + "type": "integer", + "format": "int32" + } + } + }, + "StorageArrayHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendantCount", + "objectType" + ], + "properties": { + "objectType": { + "$ref": "#/definitions/ObjectType" + }, + "descendantCount": { + "$ref": "#/definitions/StorageArrayHierarchyObjectDescendantCount" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "StorageArrayHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/StorageArrayHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "StorageArrayHierarchySortAttribute": { + "type": "string", + "description": "Attribute to sort hierarchy objects.", + "enum": [ + "name", + "effectiveSlaDomainName", + "descendantCountArray", + "descendantCountVolume", + "descendantCountVolumeGroup" + ] + }, + "StorageArrayRestoreFileConfig": { + "type": "object", + "required": [ + "path", + "restorePath" + ], + "properties": { + "path": { + "type": "string", + "description": "Full path of the file that is being restored." + }, + "restorePath": { + "type": "string", + "description": "Full path of the target folder." + } + } + }, + "StorageArrayRestoreFilesConfig": { + "type": "object", + "required": [ + "restoreConfig" + ], + "properties": { + "restoreConfig": { + "type": "array", + "description": "Array containing the full path of the target location for each file being restored.", + "items": { + "$ref": "#/definitions/StorageArrayRestoreFileConfig" + } + }, + "targetHostId": { + "type": "string", + "description": "ID assigned to the host object that is the target of an export." + } + } + }, + "StorageArrayScriptDetail": { + "type": "object", + "required": [ + "scriptErrorAction", + "scriptPath", + "timeoutMs" + ], + "properties": { + "scriptPath": { + "type": "string", + "description": "The script to be run." + }, + "timeoutMs": { + "type": "integer", + "format": "int64", + "description": "Time (in ms) after which the script will be terminated if it has not completed." + }, + "scriptErrorAction": { + "description": "Action to take if the script returns an error or times out.", + "$ref": "#/definitions/ScriptErrorAction" + } + }, + "x-rk-nullable-properties": [ + "scriptPath", + "timeoutMs", + "scriptErrorAction" + ] + }, + "StorageArrayVolumeExportConfig": { + "type": "object", + "required": [ + "exportPath", + "sourceSerial" + ], + "properties": { + "sourceSerial": { + "type": "string", + "description": "Serial number of the storage array volume that is the source for an exported volume snapshot." + }, + "exportPath": { + "type": "string", + "description": "Full path of the mount point for an exported volume snapshot." + } + } + }, + "StorageArrayVolumeGroupBase": { + "type": "object", + "required": [ + "hostId", + "name", + "storageArrayId" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of a volume group object." + }, + "storageArrayId": { + "type": "string", + "description": "ID assigned to the storage array object of a specified volume group." + }, + "hostId": { + "type": "string", + "description": "ID assigned to the application host object for a specified volume group. The application host must have the Rubrik Backup Service correctly installed and running." + }, + "proxyHostId": { + "type": "string", + "description": "ID assigned to the proxy host object for a specified volume group. The proxy host must have the Rubrik Backup Service correctly installed and running." + }, + "preBackupScript": { + "$ref": "#/definitions/StorageArrayScriptDetail" + }, + "postBackupScript": { + "$ref": "#/definitions/StorageArrayScriptDetail" + } + } + }, + "StorageArrayVolumeGroupDefinition": { + "allOf": [ + { + "$ref": "#/definitions/StorageArrayVolumeGroupBase" + }, + { + "type": "object", + "required": [ + "volumeSerials" + ], + "properties": { + "volumeSerials": { + "type": "array", + "description": "Array containing the serial numbers of the volumes in a specified volume group object.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "StorageArrayVolumeGroupDetail": { + "allOf": [ + { + "$ref": "#/definitions/StorageArrayVolumeGroupSummary" + } + ] + }, + "StorageArrayVolumeGroupExportSnapshotJobConfig": { + "type": "object", + "required": [ + "volumeExportConfigs" + ], + "properties": { + "hostId": { + "type": "string", + "description": "ID assigned to the host object that is the target of a volume snapshot export. When the host object ID is not specified, the ID assigned to the proxy host object of the volume group array is used. When the proxy host object ID is also not specified, the ID assigned to the volume group array object is used." + }, + "volumeExportConfigs": { + "type": "array", + "description": "Array containing an object for each exported volume snapshot. Each object contains the serial number of the source volume and the mount point on the host.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeExportConfig" + } + } + } + }, + "StorageArrayVolumeGroupPatch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the specified volume group object." + }, + "volumeSerials": { + "type": "array", + "description": "Array containing the serial numbers of the volumes in the specified volume group object.", + "items": { + "type": "string" + } + }, + "hostId": { + "type": "string", + "description": "ID assigned to the application host object for a specified volume group. The application host must have the Rubrik Backup Service correctly installed and running." + }, + "proxyHostId": { + "type": "string", + "description": "ID assigned to the proxy host object for a specified volume group. The proxy host must have the Rubrik Backup Service correctly installed and running." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "ID of the SLA Domain that is assigned to the specified volume group object." + }, + "preBackupScript": { + "$ref": "#/definitions/StorageArrayScriptDetail" + }, + "postBackupScript": { + "$ref": "#/definitions/StorageArrayScriptDetail" + } + } + }, + "StorageArrayVolumeGroupSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/StorageArrayVolumeGroupSnapshotSummary" + } + ] + }, + "StorageArrayVolumeGroupSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "volumeGroupName" + ], + "properties": { + "volumeGroupName": { + "type": "string" + } + } + } + ] + }, + "StorageArrayVolumeGroupSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeGroupSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "StorageArrayVolumeGroupSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "$ref": "#/definitions/StorageArrayVolumeGroupBase" + }, + { + "type": "object", + "required": [ + "arrayName", + "arrayType", + "hostname", + "id", + "isRelic", + "volumes" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a storage array volume group object." + }, + "arrayType": { + "$ref": "#/definitions/StorageArrayType" + }, + "arrayName": { + "type": "string", + "description": "Name of the storage array object of a specified volume group." + }, + "volumes": { + "type": "array", + "description": "Array containing the summaries of the storage array volume objects for a specified volume group object.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeSummary" + } + }, + "hostname": { + "type": "string", + "description": "Name of the application host object for a specified volume group." + }, + "proxyHostname": { + "type": "string", + "description": "Name of the proxy host object for a specified volume group." + }, + "isRelic": { + "type": "boolean", + "description": "Whether a specified volume group is a relic." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "StorageArrayVolumeGroupSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/StorageArrayVolumeGroupSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "GenerateSupportBundleRequest": { + "type": "object", + "properties": { + "eventId": { + "type": "string" + }, + "requestIds": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "UpdateSupportTunnelConfig": { + "type": "object", + "required": [ + "isTunnelEnabled" + ], + "properties": { + "isTunnelEnabled": { + "type": "boolean", + "description": "Pass **_true_** top open the support tunnel, and **_false_** to close." + }, + "inactivityTimeoutInSeconds": { + "type": "integer", + "format": "int64", + "description": "Tunnel inactivity timeout in seconds." + } + } + }, + "BulkDeleteUnmanagedObjectSnapshots": { + "type": "object", + "required": [ + "objectDefinitions" + ], + "properties": { + "objectDefinitions": { + "type": "array", + "items": { + "$ref": "#/definitions/UnmanagedObjectDefinition" + } + } + } + }, + "BulkDeleteUnmanagedSnapshotsConfig": { + "type": "object", + "required": [ + "snapshotIds" + ], + "properties": { + "snapshotIds": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "LocationPathPoint": { + "type": "object", + "required": [ + "managedId", + "name" + ], + "properties": { + "managedId": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "SnappableRecoveryInfo": { + "type": "object", + "required": [ + "locationId", + "newSnappableId", + "oldSnappableId" + ], + "properties": { + "newSnappableId": { + "type": "string", + "description": "The newly assigned ID of the recovered data source." + }, + "oldSnappableId": { + "type": "string", + "description": "The original data source ID used in the owner cluster." + }, + "locationId": { + "type": "string", + "description": "The reader location ID." + }, + "lastUpdatedTimeOpt": { + "type": "string", + "format": "date-time", + "description": "The time when the recovery infomation was updated." + }, + "isRefreshInProgressOpt": { + "type": "boolean", + "description": "Indicates whether snapshot metadata refresh is in progress for this data source." + } + } + }, + "UnmanagedObjectDefinition": { + "type": "object", + "required": [ + "objectId" + ], + "properties": { + "objectId": { + "type": "string", + "description": "The id of the unmanaged object." + } + } + }, + "UnmanagedObjectSlaAssignmentInfo": { + "type": "object", + "required": [ + "managedIds", + "slaDomainId" + ], + "properties": { + "slaDomainId": { + "type": "string" + }, + "managedIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "shouldApplyToNonPolicySnapshots": { + "type": "boolean" + } + } + }, + "UnmanagedObjectSnapshotSlaAssignmentInfo": { + "type": "object", + "required": [ + "slaDomainId", + "snapshotIds" + ], + "properties": { + "slaDomainId": { + "type": "string" + }, + "snapshotIds": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "UnmanagedObjectSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for unmanaged objects.", + "enum": [ + "Name", + "UnmanagedStatus", + "Location", + "UnmanagedSnapshotCount", + "LocalStorage", + "ArchiveStorage", + "RetentionSlaDomainName", + "ObjectType", + "SnapshotCount", + "AutoSnapshotCount", + "ManualSnapshotCount" + ] + }, + "UnmanagedObjectStatus": { + "type": "string", + "description": "Attributes that are available to use when filtering query results based on unmanaged object status.", + "enum": [ + "Protected", + "Relic", + "Unprotected", + "ReplicatedRelic", + "RemoteUnprotected" + ] + }, + "UnmanagedObjectSummary": { + "type": "object", + "required": [ + "archiveStorage", + "autoSnapshotCount", + "hasSnapshotsWithPolicy", + "id", + "localStorage", + "manualSnapshotCount", + "name", + "objectType", + "physicalLocation", + "retentionSlaDomainId", + "retentionSlaDomainName", + "unmanagedStatus" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "objectType": { + "type": "string", + "description": "The type of the unmanaged object. This may be VirtualMachine, MssqlDatabase, LinuxFileset, ShareFileset, WindowsFileset, NutanixVirtualMachine, Ec2Instance or StorageArrayVolumeGroup.", + "enum": [ + "VirtualMachine", + "MssqlDatabase", + "LinuxFileset", + "WindowsFileset", + "ShareFileset", + "NutanixVirtualMachine", + "HypervVirtualMachine", + "ManagedVolume", + "Ec2Instance", + "StorageArrayVolumeGroup", + "VcdVapp", + "LinuxHost", + "WindowsHost", + "OracleDatabase", + "VolumeGroup", + "AppBlueprint" + ] + }, + "physicalLocation": { + "type": "array", + "description": "Brief info of all the objects in the physical path to this Object.", + "items": { + "$ref": "#/definitions/LocationPathPoint" + } + }, + "unmanagedStatus": { + "type": "string", + "description": "Unmanaged Status of this object. Protected means that this object is still protected by an SLA Policy. Unprotected means that this object has become unprotected. Relic means that Rubrik has lost contact with this object.", + "enum": [ + "Protected", + "Relic", + "Unprotected", + "ReplicatedRelic", + "RemoteUnprotected" + ] + }, + "autoSnapshotCount": { + "type": "integer", + "format": "int64", + "description": "Number of policy-based snapshots to retain for the specified object." + }, + "isRemote": { + "type": "boolean", + "description": "A boolean that specifies if the object is remote or local. When this value is true, the object is remote." + }, + "manualSnapshotCount": { + "type": "integer", + "format": "int64", + "description": "Number of on-demand snapshots and snapshots retrieved from an archival location for specified object." + }, + "localStorage": { + "type": "integer", + "format": "int64", + "description": "Storage being taken up on the local cluster by unmanaged snapshots." + }, + "archiveStorage": { + "type": "integer", + "format": "int64", + "description": "Storage being taken up in the archival location by unmanaged snapshots." + }, + "retentionSlaDomainId": { + "type": "string", + "description": "ID assigned to an SLA retention policy." + }, + "retentionSlaDomainName": { + "type": "string", + "description": "Name of an SLA retention policy." + }, + "retentionSlaDomainPolarisManagedId": { + "type": "string", + "description": "Optional field with the ID assigned to an SLA Domain by Polaris." + }, + "recoveryInfo": { + "description": "Recovery information for the reader archival locations. Available for objects that are recovered from reader archival locations.\n", + "$ref": "#/definitions/SnappableRecoveryInfo" + }, + "hasSnapshotsWithPolicy": { + "type": "boolean", + "description": "A boolean that specifies whether any of the snapshots for this object are being retained by a SLA at any location.\n" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + }, + "UnmanagedObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/UnmanagedObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "UnmanagedObjectType": { + "type": "string", + "description": "Attributes that are available to use when filtering query results based on unmanaged object type.", + "enum": [ + "VirtualMachine", + "MssqlDatabase", + "LinuxFileset", + "WindowsFileset", + "ShareFileset", + "NutanixVirtualMachine", + "HypervVirtualMachine", + "ManagedVolume", + "Ec2Instance", + "StorageArrayVolumeGroup", + "VcdVapp", + "LinuxHost", + "WindowsHost", + "OracleDatabase", + "VolumeGroup", + "AppBlueprint" + ] + }, + "UnmanagedSnapshotSummary": { + "type": "object", + "required": [ + "date", + "id", + "retentionSlaDomainId", + "retentionSlaDomainName", + "unmanagedSnapshotType" + ], + "properties": { + "id": { + "type": "string" + }, + "date": { + "type": "string", + "format": "date-time" + }, + "unmanagedSnapshotType": { + "type": "string", + "description": "Type of this Unmanaged Snapshot. On Demand means that it is an on demand snapshot. Retrieved means the snapshot was retrieved from an archival location. Relic means the object this snapshot was taken of is a Relic. Unprotected means the object has since been unprotected after previous SLA driven snapshots were taken.", + "enum": [ + "OnDemand", + "Retrieved", + "Relic", + "Unprotected" + ] + }, + "retentionSlaDomainId": { + "type": "string", + "description": "ID assigned to an SLA retention policy." + }, + "retentionSlaDomainName": { + "type": "string", + "description": "Name of an SLA retention policy." + }, + "retentionSlaDomainPolarisManagedId": { + "type": "string", + "description": "Optional field containing Polaris managed id of the retention SLA domain if it is Polaris managed." + }, + "isRetentionSlaDomainRetentionLocked": { + "type": "boolean", + "description": "Boolean that indicates whether an SLA Domain is Retention Locked. When the value is true the SLA Domain is a Retention Lock SLA Domain." + } + } + }, + "UnmanagedSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/UnmanagedSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "TotpStatusInternal": { + "type": "object", + "required": [ + "isEnabled", + "isEnforced" + ], + "properties": { + "isEnabled": { + "type": "boolean", + "description": "Indicates whether the time-based one time password (TOTP) authentication method is enabled. Returns true when TOTP is enabled and false when TOTP is disabled.\n" + }, + "isEnforced": { + "type": "boolean", + "description": "Indicates whether the time-based one time password (TOTP) authentication method is being enforced. Returns true when TOTP is enforced and false when TOTP is not enforced.\n" + } + } + }, + "UserAccountLockStatus": { + "type": "object", + "required": [ + "isLocked" + ], + "properties": { + "isLocked": { + "type": "boolean" + }, + "reason": { + "type": "string", + "description": "Specifies why the user is locked." + } + } + }, + "UserAccountStatus": { + "type": "object", + "required": [ + "accountLockStatus" + ], + "properties": { + "accountLockStatus": { + "$ref": "#/definitions/UserAccountLockStatus" + } + } + }, + "UserDefinition": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "emailAddress": { + "type": "string" + }, + "contactNumber": { + "type": "string" + }, + "mfaServerId": { + "type": "string" + }, + "isTotpEnforced": { + "type": "boolean", + "description": "Indicates whether the time-based one time password (TOTP) authentication method is being enforced. Returns true when TOTP is enforced and false when TOTP is not enforced.\n" + } + } + }, + "UserDetail": { + "type": "object", + "required": [ + "authDomainId", + "createTime", + "createdById", + "id", + "username" + ], + "properties": { + "id": { + "type": "string" + }, + "authDomainId": { + "type": "string" + }, + "username": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "emailAddress": { + "type": "string" + }, + "contactNumber": { + "type": "string" + }, + "createdById": { + "type": "string" + }, + "createTime": { + "type": "string" + }, + "mfaServerId": { + "type": "string" + }, + "status": { + "$ref": "#/definitions/UserAccountStatus" + }, + "totpStatus": { + "$ref": "#/definitions/TotpStatusInternal" + } + } + }, + "UserPreferencesInfo": { + "type": "object", + "required": [ + "language" + ], + "properties": { + "defaultOrg": { + "type": "string" + }, + "language": { + "$ref": "#/definitions/Language" + } + } + }, + "UserUpdateInfo": { + "type": "object", + "properties": { + "password": { + "type": "string", + "x-secret": true + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "emailAddress": { + "type": "string" + }, + "contactNumber": { + "type": "string" + }, + "mfaServerId": { + "type": "string" + }, + "isTotpEnforced": { + "type": "boolean", + "description": "Indicates whether the time-based one time password (TOTP) authentication method is being enforced. Returns true when TOTP is enforced and false when TOTP is not enforced.\n" + } + } + }, + "VcdClusterBaseConfig": { + "type": "object", + "required": [ + "hostname", + "username" + ], + "properties": { + "hostname": { + "type": "string" + }, + "username": { + "type": "string" + }, + "caCerts": { + "type": "string", + "description": "Concatenated X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + } + } + }, + "VcdClusterConfig": { + "allOf": [ + { + "$ref": "#/definitions/VcdClusterBaseConfig" + }, + { + "type": "object", + "required": [ + "password" + ], + "properties": { + "password": { + "type": "string", + "x-secret": true + } + } + } + ] + }, + "VcdClusterDetail": { + "allOf": [ + { + "$ref": "#/definitions/VcdClusterSummary" + } + ] + }, + "VcdClusterListSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for vCD Cluster objects.", + "enum": [ + "Name", + "Status" + ] + }, + "VcdClusterPatch": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string", + "x-secret": true + }, + "caCerts": { + "type": "string", + "description": "Concatenated X.509 certificates in Base64 encoded DER format. Each certificate must start with -----BEGIN CERTIFICATE----- and end with -----END CERTIFICATE-----." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this Vcd Cluster to the given SLA Domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + } + } + }, + "VcdClusterSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "$ref": "#/definitions/VcdClusterBaseConfig" + }, + { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a vCD Cluster object." + }, + "connectionStatus": { + "description": "Connection status of a vCD Cluster object.", + "$ref": "#/definitions/VcdConnectionStatus" + } + } + } + ] + }, + "VcdClusterSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VcdClusterSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VcdConnectionStatus": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "description": "Status of a specified vCD Cluster object.", + "$ref": "#/definitions/RefreshableObjectConnectionStatusType" + }, + "message": { + "type": "string", + "description": "Status details for a specified vCD Cluster object. Empty except when the status of the vCD Cluster object is 'BadlyConfigured'." + } + } + }, + "VimserverSummary": { + "type": "object", + "required": [ + "connectionStatus", + "hostname", + "id", + "name", + "vcdClusterId", + "vcdClusterName" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a VimServer object." + }, + "name": { + "type": "string", + "description": "Name of a VimServer object." + }, + "vcenterId": { + "type": "string", + "description": "ID assigned to a vCenter Server object that is attached to a specified VimServer object." + }, + "vcdClusterId": { + "type": "string", + "description": "ID assigned to a vCD Cluster object that contains a specified VimServer object." + }, + "vcdClusterName": { + "type": "string", + "description": "Name of a vCD Cluster object that contains a specified VimServer object." + }, + "hostname": { + "type": "string", + "description": "Hostname of a vCenter Server that is the target of a connection from a specified VimServer object." + }, + "connectionStatus": { + "description": "Connection status of a vCD Cluster object.", + "$ref": "#/definitions/VcdConnectionStatus" + } + } + }, + "VimserverSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VimserverSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VappAttributes": { + "type": "object", + "required": [ + "isVappTemplate" + ], + "properties": { + "isVappTemplate": { + "type": "boolean", + "description": "Indicates whether the vCD hierarchy object is a vApp template object." + } + } + }, + "VcdHierarchyObjectAttributes": { + "type": "object", + "properties": { + "vappAtributes": { + "$ref": "#/definitions/VappAttributes" + } + }, + "description": "attributes pertaining to each vCD hierarchy object." + }, + "VcdHierarchyObjectDescendantCount": { + "type": "object", + "properties": { + "vcdCluster": { + "type": "integer", + "format": "int32", + "description": "Number of descendant vCD cluster objects in a specified hierarchy." + }, + "connectedVimServer": { + "type": "integer", + "format": "int32", + "description": "Number of descendant VIM Server objects that are linked to a vCenter Server object." + }, + "disconnectedVimServer": { + "type": "integer", + "format": "int32", + "description": "Number of descendant VIM Server objects that are not linked to a vCenter Server object." + }, + "org": { + "type": "integer", + "format": "int32", + "description": "Number of descendant Organization objects in a specified hierarchy." + }, + "orgVdc": { + "type": "integer", + "format": "int32", + "description": "Number of descendant virtual data center objects in a specified hierarchy." + }, + "catalog": { + "type": "integer", + "format": "int32", + "description": "Number of descendant vCD catalog objects in a specified; hierarchy." + }, + "vApp": { + "type": "integer", + "format": "int32", + "description": "Number of descendant vApp objects in a specified hierarchy." + } + } + }, + "VcdHierarchyObjectSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for vCD hierarchy objects.", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment", + "ConnectionStatus", + "VappCount" + ] + }, + "VcdHierarchyObjectSummary": { + "allOf": [ + { + "$ref": "#/definitions/ManagedHierarchyObjectSummary" + }, + { + "type": "object", + "required": [ + "descendantCount", + "isDeleted", + "objectType" + ], + "properties": { + "objectType": { + "$ref": "#/definitions/VcdObjectType" + }, + "descendantCount": { + "$ref": "#/definitions/VcdHierarchyObjectDescendantCount" + }, + "connectionStatus": { + "$ref": "#/definitions/VcdConnectionStatus" + }, + "ipAddress": { + "type": "string", + "description": "IPv4 address for a vCD cluster or vCenter Server that is managed through a VIM Server." + }, + "vcenterId": { + "type": "string", + "description": "ID assigned to a vCenter Server instance that is managed through a VIM Server." + }, + "isDeleted": { + "type": "boolean", + "description": "Indicates whether the vCD hierarchy object is deleted." + }, + "extendedAttributes": { + "description": "Additional attributes required by hierarchy object summary.", + "$ref": "#/definitions/VcdHierarchyObjectAttributes" + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "VcdHierarchyObjectSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VcdHierarchyObjectSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VcdObjectType": { + "type": "string", + "description": "Types of object that can be present in the vCD hierarchy.", + "enum": [ + "Cluster", + "VimServer", + "Org", + "OrgVdc", + "Catalog", + "vApp" + ] + }, + "VappExportMode": { + "type": "string", + "description": "Target type for the specified vApp export.", + "enum": [ + "ExportToNewVapp", + "ExportToTargetVapp" + ] + }, + "VappNetworkMode": { + "type": "string", + "description": "Mode of connection of a vApp network to a vCloud organization VDC network.", + "enum": [ + "Isolated", + "NATRouted", + "Bridged" + ] + }, + "VappVmIpAddressingMode": { + "type": "string", + "description": "Method used to allocate IP addresses for the specified vApp network.", + "enum": [ + "DHCP", + "Manual", + "Pool", + "None" + ] + }, + "AppSearchResponse": { + "allOf": [ + { + "$ref": "#/definitions/SearchResponse" + }, + { + "type": "object", + "required": [ + "childObjectId", + "childObjectName" + ], + "properties": { + "childObjectId": { + "type": "string", + "description": "ID assigned to the child virtual machine of the vApp where this file was found." + }, + "childObjectName": { + "type": "string", + "description": "Name of the child virtual machine of the vApp where this file was found." + } + } + } + ] + }, + "AppSearchResponseListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/AppSearchResponse" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "CreateNewVappParams": { + "type": "object", + "required": [ + "name", + "orgVdcId" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the specified vApp within vCloud." + }, + "orgVdcId": { + "type": "string", + "description": "ID assigned to the Organization VDC object that will contain the newly created vApp object." + } + } + }, + "CreateVappNetworkParams": { + "allOf": [ + { + "$ref": "#/definitions/VappNetworkSummary" + }, + { + "type": "object", + "properties": { + "newName": { + "type": "string", + "description": "Name to assign to the vApp network that is referenced by the specified new vApp network object. If a name is specified, the Rubrik REST API server uses the name to rename the vApp network within the vCloud. If the value is empty, the vApp network is not renamed." + } + } + } + ] + }, + "ManagedObjectDescendantCountVcdVappFields": { + "type": "object", + "properties": { + "vapp": { + "type": "integer", + "format": "int32", + "description": "Number of vCD vApps." + } + } + }, + "VappExportOptions": { + "type": "object", + "required": [ + "allChildVmsWithDefaultNetworkConnections", + "availableStoragePolicies", + "restorableNetworks" + ], + "properties": { + "restorableNetworks": { + "type": "array", + "description": "Array of vApp networks in the vApp snapshot being exported that can be enabled at the export location.", + "items": { + "$ref": "#/definitions/CreateVappNetworkParams" + } + }, + "targetVappNetworks": { + "type": "array", + "description": "Array of vApp networks at the export location that can be connected to the vApp virtual machines in the exported vApp snapshot.", + "items": { + "$ref": "#/definitions/VappNetworkSummary" + } + }, + "availableStoragePolicies": { + "type": "array", + "description": "Storage policies that can be used as a target for virtual machines being exported.", + "items": { + "$ref": "#/definitions/VcdOrgVdcStorageProfile" + } + }, + "allChildVmsWithDefaultNetworkConnections": { + "type": "array", + "description": "Array containing summary information for the vApp virtual machines in the specified vApp snapshot, including the default network mappings.", + "items": { + "$ref": "#/definitions/VappVmRestoreSpec" + } + } + } + }, + "VappExportSnapshotJobConfig": { + "type": "object", + "required": [ + "exportMode", + "networksToRestore", + "vmsToExport" + ], + "properties": { + "exportMode": { + "$ref": "#/definitions/VappExportMode" + }, + "newVappParams": { + "$ref": "#/definitions/CreateNewVappParams" + }, + "targetVappId": { + "type": "string", + "description": "ID assigned to the target vApp object, when the export is into an existing vApp. When the export is not into a target vApp, remove the 'targetVappId' member." + }, + "networksToRestore": { + "type": "array", + "description": "Array of vApp networks that are in the specified vApp snapshot and should be set up as part of the export operation. A vApp network that is not included in the array will not be set up in the exported vApp.", + "items": { + "$ref": "#/definitions/CreateVappNetworkParams" + } + }, + "vmsToExport": { + "type": "array", + "description": "An array containing summary information for the virtual machines included in the vApp export.", + "items": { + "$ref": "#/definitions/VappVmRestoreSpec" + } + }, + "shouldPowerOnVappAfterExport": { + "type": "boolean", + "description": "Boolean value that indicates whether to power on the exported vApp. Use 'true' to turn the power on for the exported vApp or use 'false' to leave the power off for the exported vApp.", + "default": false + } + } + }, + "VappInstantRecoveryJobConfig": { + "type": "object", + "required": [ + "vmsToRestore" + ], + "properties": { + "vmsToRestore": { + "type": "array", + "description": "An array containing the restore specification for an Instant Recovery of virtual machines in a vApp snapshot.", + "items": { + "$ref": "#/definitions/VappVmRestoreSpec" + } + }, + "shouldPowerOnVmsAfterRecovery": { + "type": "boolean", + "description": "Boolean value that indicates whether to power on the recovered virtual machines in a vApp after Instant Recovery. Use 'true' to turn the power on for the recovered virtual machines or use 'false' to leave the power off for the virtual machines.", + "default": false + } + } + }, + "VappInstantRecoveryOptions": { + "type": "object", + "required": [ + "availableVappNetworks", + "restorableVms" + ], + "properties": { + "restorableVms": { + "type": "array", + "description": "An array of virtual machines that can be restored and their associated default network connections.", + "items": { + "$ref": "#/definitions/VappVmRestoreSpec" + } + }, + "availableVappNetworks": { + "type": "array", + "description": "An array of network connections available through the specified vApp object.", + "items": { + "$ref": "#/definitions/VappNetworkSummary" + } + } + } + }, + "VappNetworkSummary": { + "type": "object", + "required": [ + "isDeployed", + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the specified vApp network object." + }, + "parentNetworkId": { + "type": "string", + "description": "vCloud Director ID of the associated organization VDC network object. For an Isolated network, the value is empty." + }, + "isDeployed": { + "type": "boolean", + "description": "Boolean value that indicates whether the specified vApp network object has been deployed. Value is 'true' when the vApp network object has been deployed and 'false' when it has not been deployed." + } + } + }, + "VappVmDetail": { + "allOf": [ + { + "$ref": "#/definitions/VappVmSummary" + }, + { + "type": "object", + "properties": { + "vcenterVm": { + "description": "Detailed information about the vCenter Server virtual machine object that is represented by a specified vApp virtual machine.", + "$ref": "#/definitions/VirtualMachineDetail" + } + } + } + ] + }, + "VappVmNetworkConnection": { + "type": "object", + "required": [ + "addressingMode", + "isConnected", + "nicIndex" + ], + "properties": { + "nicIndex": { + "type": "integer", + "format": "int32", + "description": "Index assigned to the NIC that is used by the specified vApp network connection." + }, + "macAddress": { + "type": "string", + "description": "MAC address of the NIC that is used by the specified vApp network connection." + }, + "addressingMode": { + "$ref": "#/definitions/VappVmIpAddressingMode" + }, + "ipAddress": { + "type": "string", + "description": "IPv4 address to assign to the specified vApp network connection. Set this value only when the network address allocation method is 'Static'. Otherwise, the value should be empty." + }, + "vappNetworkName": { + "type": "string", + "description": "Name of the vApp network to which the NIC corresponding to this connection will connect to." + }, + "isConnected": { + "type": "boolean", + "description": "Boolean value that indicates whether the specified vApp network connection is enabled. Set the value to 'true' to enable the connection or 'false' to disable the connection." + }, + "networkAdapterType": { + "type": "string", + "description": "The network adapter type of the NIC." + } + } + }, + "VappVmRestoreSpec": { + "type": "object", + "required": [ + "name", + "networkConnections", + "vcdMoid" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the specified vApp virtual machine within vCloud." + }, + "vcdMoid": { + "type": "string", + "description": "vCloud managed object ID (moid) of the specified vApp virtual machine." + }, + "storagePolicyId": { + "type": "string", + "description": "Storage policy where this vApp virtual machine should be restored to. If omitted, the virtual machines will be exported to the default storage policy of the target Organization VDC." + }, + "networkConnections": { + "type": "array", + "items": { + "$ref": "#/definitions/VappVmNetworkConnection" + } + } + } + }, + "VappVmSummary": { + "allOf": [ + { + "$ref": "#/definitions/VappVmRestoreSpec" + }, + { + "type": "object", + "required": [ + "storagePolicyId" + ], + "properties": { + "storagePolicyId": { + "type": "string", + "description": "Storage policy where this vApp virtual machine should be restored to. If omitted, the virtual machines will be exported to the default storage policy of the target Organization VDC." + } + } + } + ] + }, + "VcdOrgVdcStorageProfile": { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the Organization VDC storage profile." + }, + "id": { + "type": "string", + "description": "ID assigned to the Organization VDC storage profile." + } + } + }, + "VcdVappDetail": { + "allOf": [ + { + "$ref": "#/definitions/VcdVappPatch" + }, + { + "$ref": "#/definitions/VcdVappSummary" + }, + { + "type": "object", + "required": [ + "isPaused", + "networks", + "vms" + ], + "properties": { + "isPaused": { + "type": "boolean", + "description": "Boolean value that indicates whether protection activity is paused for the specified vApp. Set to 'true' when protection activity is paused and 'false' when protection activity is not paused. Protection activity includes backup, replication, and archiving." + }, + "networks": { + "type": "array", + "description": "Array that lists the vApp network objects that exist in the specified vApp object.", + "items": { + "$ref": "#/definitions/VappNetworkSummary" + } + }, + "vms": { + "type": "array", + "description": "Array containing detailed information for all of the vApp virtual machine objects.", + "items": { + "$ref": "#/definitions/VappVmDetail" + } + } + } + } + ] + }, + "VcdVappObjectSortAttribute": { + "type": "string", + "description": "Attributes that are available to use when sorting query results for vApp objects.", + "enum": [ + "Name", + "EffectiveSlaDomainName", + "SlaAssignment" + ] + }, + "VcdVappPatch": { + "type": "object", + "properties": { + "configuredSlaDomainId": { + "type": "string", + "description": "ID assigned to the SLA Domain object that manages protection for the specified vApp object. Existing snapshots of the object will be retained with the configuration of the specified SLA Domain." + }, + "isPaused": { + "type": "boolean", + "description": "Boolean value that indicates whether protection activity is paused for the specified vApp. Set to 'true' when protection activity is paused and 'false' when protection activity is not paused. Protection activity includes backup, replication, and archiving." + }, + "isBestEffortSynchronizationEnabled": { + "type": "boolean", + "description": "Boolean value that indicates whether the Rubrik cluster should attempt taking synchronized snapshots across all child virtual machines of the vApp." + }, + "vcdVmMoidsToExcludeFromSnapshot": { + "type": "array", + "description": "Array containing vCloud Director virtual machine moids that will be excluded from vApp snapshots.", + "items": { + "type": "string" + } + } + } + }, + "VcdVappSlaObjectCount": { + "type": "object", + "properties": { + "numVcdVapps": { + "type": "integer", + "format": "int32", + "description": "The number of vApps protected under this SLA Domain." + } + } + }, + "VcdVappSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "$ref": "#/definitions/VcdVappSnapshotSummaryDetailSharedFields" + }, + { + "type": "object", + "required": [ + "networks", + "vmSnapshots" + ], + "properties": { + "vmSnapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/VcdVmSnapshotDetail" + } + }, + "networks": { + "type": "array", + "description": "Array that lists the vApp networks in the vApp at the time of the snapshot.", + "items": { + "$ref": "#/definitions/VappNetworkSummary" + } + }, + "excludedVcdVmMoids": { + "type": "array", + "description": "Array that lists the Virtual Machines of the vApp that where excluded from the snapshot.", + "items": { + "type": "string" + } + } + } + } + ] + }, + "VcdVappSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "$ref": "#/definitions/VcdVappSnapshotSummaryDetailSharedFields" + }, + { + "type": "object", + "required": [ + "vmSnapshots" + ], + "properties": { + "vmSnapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/VcdVmSnapshotSummary" + } + } + } + } + ] + }, + "VcdVappSnapshotSummaryDetailSharedFields": { + "type": "object", + "required": [ + "vappName" + ], + "properties": { + "vappName": { + "type": "string", + "description": "ID assigned to the vApp object that is the source of the specified snapshot object." + }, + "isSynchronized": { + "type": "boolean", + "description": "Boolean value that indicates whether a vApp snapshot is synchronized with the source vApp. Set to 'true' when the vApp snapshot is synchronized, and set to 'false' when it is not synchronized." + } + } + }, + "VcdVappSnapshotSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VcdVappSnapshotSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VcdVappSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "id", + "infraPath", + "name", + "numMissedSnapshot" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to a vCD vApp object." + }, + "name": { + "type": "string", + "description": "Name assigned to a vCD vApp object." + }, + "vcdClusterId": { + "type": "string", + "description": "ID assigned to a vCD Cluster object that contains a specified vApp object." + }, + "vcdClusterName": { + "type": "string", + "description": "Name assigned to a vCD Cluster object that contains a specified vApp object." + }, + "infraPath": { + "type": "array", + "description": "Brief information of all the objects in the infrastructure path to this vCD vApp object.", + "items": { + "$ref": "#/definitions/ManagedHierarchyObjectAncestor" + } + }, + "isRelic": { + "type": "boolean", + "description": "Boolean value that indicates whether a vApp is present on the specified vCD cluster. Set to 'true' when the vApp is present and 'false' when the vApp is not present." + }, + "numMissedSnapshot": { + "type": "integer", + "format": "int32", + "description": "An integer that specifies the number of missed snapshots." + }, + "lastSnapshotTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp of the previous snapshot." + }, + "includeBackupTaskInfo": { + "type": "boolean", + "description": "Boolean value that indicates if backup task information isincluded in the response." + }, + "currentBackupTaskInfo": { + "description": "Information about the current backup task.", + "$ref": "#/definitions/BackupTaskDiagnosticInfo" + }, + "isTemplate": { + "type": "boolean", + "description": "A Boolean value that indicates whether the vApp object is a template. When this value is 'true,' the vApp object is a template. When this value is 'false,' the vApp object is not a template." + }, + "catalogId": { + "type": "string", + "description": "ID of the parent catalog if the vApp object is a template." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + } + } + } + ] + }, + "VcdVappSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VcdVappSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VcdVmSnapshotDetail": { + "allOf": [ + { + "$ref": "#/definitions/VcdVmSnapshotSummary" + }, + { + "type": "object", + "required": [ + "networkConnections" + ], + "properties": { + "networkConnections": { + "type": "array", + "description": "Array of networks present in the virtual machine at the time of the snapshot.", + "items": { + "$ref": "#/definitions/VappVmNetworkConnection" + } + } + } + } + ] + }, + "VcdVmSnapshotSummary": { + "type": "object", + "required": [ + "vcdVmMoid", + "vcenterVmId", + "vmName", + "vmSnapshotId" + ], + "properties": { + "vcenterVmId": { + "type": "string", + "description": "ID assigned to the object that represents the virtual machine that is the source of a specified snapshot object." + }, + "vmSnapshotId": { + "type": "string", + "description": "ID assigned to the object that represents a virtual machine snapshot." + }, + "vmName": { + "type": "string", + "description": "Name of the virtual machine object of the snapshot." + }, + "vcdVmMoid": { + "type": "string", + "description": "Id assigned by vCloud Director to the virtual machine of the specified snapshot." + }, + "indexState": { + "type": "integer", + "format": "int64", + "description": "Integer value representing the state of the indexing job for a snapshot. 0 means that the indexing has not begun or is in progress. 1 means indexing completed successfully. 2 means that the indexer failed to process this snapshot.\n" + } + } + }, + "ClusterVisibilityConfig": { + "type": "object", + "required": [ + "hostGroupFilter", + "id" + ], + "properties": { + "id": { + "type": "string", + "description": "VMware managed object ID of the compute cluster. This is not the ID managed by Rubrik." + }, + "hostGroupFilter": { + "type": "array", + "description": "Names of the host groups being protected.", + "items": { + "type": "string" + } + } + } + }, + "ClusterVisibilityInfo": { + "allOf": [ + { + "$ref": "#/definitions/ClusterVisibilityConfig" + }, + { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the compute cluster." + } + } + } + ] + }, + "RefreshVmRequest": { + "type": "object", + "required": [ + "vmMoid" + ], + "properties": { + "vmMoid": { + "type": "string" + } + } + }, + "StaticIpInfo": { + "type": "object", + "required": [ + "ip_addresses", + "subnet_mask" + ], + "properties": { + "ip_addresses": { + "type": "array", + "description": "IP addresses and ranges, separated by commas.", + "items": { + "type": "string" + } + }, + "subnet_mask": { + "type": "string", + "description": "Subnet mask for the specified IP addresses." + }, + "gateway": { + "type": "string", + "description": "Gateway for the specified IP addresses." + }, + "dns_servers": { + "type": "array", + "description": "DNS Servers for the specified IP addresses.", + "items": { + "type": "string" + } + } + }, + "description": "Information about static IP configuration." + }, + "VsphereCategory": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "Category ID." + }, + "name": { + "type": "string", + "description": "Category Name." + }, + "vmCount": { + "type": "integer", + "format": "int32", + "description": "An integer value that identifies how many VMs are under this category." + }, + "effectiveSlaDomainId": { + "type": "string", + "description": "Effective SLA Domain ID for the tag category." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "Effective SLA Domain name for the tag category." + }, + "effectiveSlaSourceObjectId": { + "type": "string", + "description": "Effective SLA Domain source ID for the tag category." + }, + "slaAssignment": { + "type": "string", + "description": "Sla Assignment for the tag category." + } + } + } + ] + }, + "VsphereCategoryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VsphereCategory" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VsphereTag": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "type": "object", + "required": [ + "id", + "name", + "virtualMachineIds" + ], + "properties": { + "id": { + "type": "string", + "description": "Tag ID." + }, + "name": { + "type": "string", + "description": "Tag Name." + }, + "virtualMachineIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "vmCount": { + "type": "integer", + "format": "int32", + "description": "An integer value that identifies how many VMs are under this tag." + }, + "effectiveSlaDomainId": { + "type": "string", + "description": "Effective SLA Domain ID for the tag." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "Effective SLA Domain name for the tag." + }, + "effectiveSlaSourceObjectId": { + "type": "string", + "description": "Effective SLA Domain source ID for the tag." + }, + "slaAssignment": { + "type": "string", + "description": "Sla Assignment for the tag." + } + } + } + ] + }, + "VsphereTagListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VsphereTag" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "AdaptiveThrottlingSettingsVmwareFields": { + "type": "object", + "properties": { + "vmwareThrottlingSettings": { + "$ref": "#/definitions/VmwareAdaptiveThrottlingSettings" + } + } + }, + "BatchVmSnapshotSummaries": { + "type": "object", + "required": [ + "responses" + ], + "properties": { + "responses": { + "type": "array", + "description": "The snapshot summaries for a list of virtual machines.", + "items": { + "$ref": "#/definitions/VmSnapshotSummaries" + } + } + } + }, + "CdpState": { + "type": "string", + "description": "Current CDP State of virtual machine.", + "enum": [ + "NotEnabled", + "Pending", + "WaitingForInitialSnapshot", + "Running", + "Broken", + "FailedToStart", + "TryingToStart", + "Resyncing" + ] + }, + "DataCenterDetail": { + "allOf": [ + { + "$ref": "#/definitions/DataCenterSummary" + }, + { + "type": "object", + "required": [ + "moid", + "objectType" + ], + "properties": { + "objectType": { + "type": "string" + }, + "moid": { + "type": "string" + }, + "hosts": { + "type": "array", + "items": { + "$ref": "#/definitions/VmwareHostSummary" + } + }, + "dataStores": { + "type": "array", + "items": { + "$ref": "#/definitions/DataStoreSummary" + } + } + } + } + ] + }, + "DataCenterSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "$ref": "#/definitions/EffectiveSlaHolder" + }, + { + "type": "object", + "properties": { + "vcenterId": { + "type": "string" + } + } + } + ] + }, + "DataCenterSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DataCenterSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "DataStoreDetail": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "moid": { + "type": "string" + }, + "name": { + "type": "string" + }, + "dataStoreType": { + "type": "string" + }, + "capacity": { + "type": "integer", + "format": "int64" + }, + "path": { + "type": "string" + }, + "dataCenter": { + "$ref": "#/definitions/DataCenterSummary" + }, + "virtualDisks": { + "type": "array", + "items": { + "$ref": "#/definitions/VirtualDiskSummary" + } + }, + "isLocal": { + "type": "boolean" + } + } + }, + "DataStoreSummary": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "capacity": { + "type": "integer", + "format": "int64" + }, + "dataStoreType": { + "type": "string" + }, + "dataCenterName": { + "type": "string" + }, + "isLocal": { + "type": "boolean" + } + } + }, + "DataStoreSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/DataStoreSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "EsxSubnets": { + "type": "object", + "required": [ + "esxSubnets" + ], + "properties": { + "esxSubnets": { + "type": "string" + } + } + }, + "HostFilterStatus": { + "type": "string", + "description": "Status of Rubrik Io Filter on Hosts.", + "enum": [ + "Installed", + "UnsupportedByVmware", + "OutOfDate", + "PastExpectedDate", + "Uninstalled", + "Unknown" + ] + }, + "HostUiFilterStatus": { + "type": "string", + "description": "Status of Rubrik Io Filter on ESX Host.", + "enum": [ + "Ok", + "InstallInProgress", + "UninstallInProgress", + "UpgradeInProgress", + "UpgradeNeeded", + "RetryInstall", + "CheckVcenter", + "ErrorContactSupport", + "ErrorMaintenanceMode", + "NoFilter", + "Unknown", + "UnsupportedByVmware" + ] + }, + "Map_VmEndPointStatus": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/VmEndPointStatus" + } + }, + "MountDetail": { + "type": "object", + "required": [ + "id", + "isReady", + "snapshotDate", + "vmId", + "vmName" + ], + "properties": { + "id": { + "type": "string" + }, + "snapshotDate": { + "type": "string", + "format": "date-time" + }, + "vmId": { + "type": "string" + }, + "vmName": { + "type": "string" + }, + "virtualMachine": { + "$ref": "#/definitions/VirtualMachineSummary" + }, + "powerStatus": { + "type": "string", + "description": "The power status of the mounted VM(ON,OFF,SLEEP etc.)." + }, + "host": { + "$ref": "#/definitions/VmwareHostSummary" + }, + "isReady": { + "type": "boolean" + }, + "mountRequestId": { + "type": "string" + }, + "unmountRequestId": { + "type": "string" + }, + "datastoreName": { + "type": "string", + "description": "The name of the datastore that contains the mounted VMDK." + }, + "datastoreReady": { + "type": "boolean", + "description": "A boolean value that specifies whether the datastore is ready. When 'true,' the datastore is ready. When 'false,' the datastore is not ready." + }, + "createDatastoreOnlyMount": { + "type": "boolean", + "description": "This boolean value determines whether or not the mount is created as a datastore only. When 'true,' the mount is created with datastore and not the associated virtual machine. When 'false,' the mount is created with both the datastore and the associated virtual machine." + }, + "hasAttachingDisk": { + "type": "boolean", + "description": "A Boolean value that determines whether this job is an attaching disk mount job. When 'true,' this is an attaching disk mount job. When 'false,' this is not an attaching disk mount job." + }, + "attachingDiskCount": { + "type": "integer", + "format": "int32", + "description": "An integer value that identifies how many disks are attached." + }, + "mountTimestamp": { + "type": "string", + "format": "date-time", + "description": "Gives the timestamp at which the mount was created." + } + } + }, + "ParentAppInfo": { + "type": "object", + "required": [ + "isProtectedThruHierarchy" + ], + "properties": { + "id": { + "type": "string", + "description": "ID assigned to the vApp object that manages a specified virtual machine." + }, + "isProtectedThruHierarchy": { + "type": "boolean", + "description": "Boolean value that indicates whether a virtual machine is protected through the SLA Domain assigned to the parent vApp. Set to 'true' when the virtual machine is protected through the parent vApp, otherwise set to 'false'. Direct assignment of a virtual machine to an SLA Domain is not possible when this value is 'true'. Also, setting this value to true is not possible when the virtual machine has an existing direct assignment to an SLA Domain." + } + } + }, + "UpdateMountConfig": { + "type": "object", + "required": [ + "powerStatus" + ], + "properties": { + "powerStatus": { + "type": "boolean", + "description": "True to power on, false to power off." + }, + "shouldForce": { + "type": "boolean", + "description": "A Boolean that specifies whether to forcibly power down a virtual machine that is already mounted with Storage vMotion. When this value is 'true', the virtual machine is forcibly powered down. The default value for this Boolean is 'false'." + } + } + }, + "VirtualDiskDetail": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "deviceKey": { + "type": "integer", + "format": "int64", + "description": "Unique and reusable key that vSphere uses to identify VMDK files and other devices in the hardware of a virtual machine." + }, + "size": { + "type": "integer", + "format": "int64" + }, + "excludeFromSnapshots": { + "type": "boolean" + } + } + }, + "VirtualDiskSummary": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "vmName": { + "type": "string" + } + } + }, + "VirtualDiskSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VirtualDiskSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VirtualDiskUpdate": { + "type": "object", + "properties": { + "excludeFromSnapshots": { + "type": "boolean" + } + } + }, + "VirtualMachineBrief": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "slaDomainName": { + "type": "string", + "description": "DEPRECATED: The name of the SLA domain this VM belongs to." + }, + "effectiveSlaDomainName": { + "type": "string", + "description": "The name of the effective SLA domain for this VM." + } + } + }, + "VirtualMachineBriefListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VirtualMachineBrief" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "VirtualMachineBulkUpdate": { + "type": "object", + "required": [ + "updateProperty", + "vmIds" + ], + "properties": { + "vmIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "updateProperty": { + "$ref": "#/definitions/VirtualMachineUpdateWithSecret" + } + } + }, + "VirtualMachineDetail": { + "allOf": [ + { + "$ref": "#/definitions/VirtualMachineUpdate" + }, + { + "$ref": "#/definitions/VirtualMachineSummary" + }, + { + "$ref": "#/definitions/BlackoutWindowResponseInfo" + }, + { + "type": "object", + "required": [ + "cdpState", + "effectiveSlaDomain", + "guestOsType", + "isArrayIntegrationEnabled", + "isArrayIntegrationPossible", + "isCdpEnabled", + "isInVmc", + "maxNestedVsphereSnapshots", + "physicalStorage", + "vcenterName" + ], + "properties": { + "effectiveSlaDomain": { + "$ref": "#/definitions/SlaDomainSummary" + }, + "currentHost": { + "$ref": "#/definitions/VmwareHostSummary" + }, + "virtualDiskIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "snapshots": { + "type": "array", + "items": { + "$ref": "#/definitions/VmSnapshotSummary" + } + }, + "snapshotCount": { + "type": "integer", + "format": "int32" + }, + "maxNestedVsphereSnapshots": { + "type": "integer", + "format": "int32" + }, + "physicalStorage": { + "type": "integer", + "format": "int64" + }, + "logicalSize": { + "type": "integer", + "format": "int64", + "description": "This returns the sum of all virtual disk sizes in the specified virtual machine." + }, + "guestOsName": { + "type": "string", + "description": "Long form name, including type and release designation, for the operating system that is installed on a virtual machine." + }, + "guestOsType": { + "type": "string", + "description": "Type of operating system used by the VMware virtual machine.", + "enum": [ + "Linux", + "Windows", + "Unknown" + ] + }, + "isArrayIntegrationPossible": { + "type": "boolean", + "description": "Boolean value that indicates whether the performance enhancements of storage array integration are available for the specified virtual machine object. Storage array integration is available when all of the datastores that are assigned to the virtual machine reside on a qualified storage array. Set to 'true' when storage array integration is available and set to 'false' when storage array integration is not available." + }, + "isArrayIntegrationEnabled": { + "type": "boolean", + "description": "Boolean value that determines whether the available storage array integration is used with the specified virtual machine. Set to 'true' to use storage array integration and set to 'false' to not use storage array integration. Refer to the value of 'isArrayIntegrationPossible' to determine whether storage array integration is available for a virtual machine." + }, + "guestCredential": { + "$ref": "#/definitions/BaseGuestCredentialDetail" + }, + "isAgentRegistered": { + "type": "boolean", + "description": "Boolean value that indicates whether the Rubrik Backup Service is installed and registered for the specified virtual machine. Set to 'true' when the Rubrik Backup Service is installed and registered and in all other cases set to 'false'." + }, + "oldestRecoveryPoint": { + "type": "string", + "format": "date-time", + "description": "Oldest point in time that we can recover to if this is a CDP enabled VM." + }, + "latestRecoveryPoint": { + "type": "string", + "format": "date-time", + "description": "Latest point in time that we can recover to if this is a CDP enabled VM." + }, + "isCdpEnabled": { + "type": "boolean" + }, + "cdpState": { + "$ref": "#/definitions/CdpState" + }, + "isInVmc": { + "type": "boolean", + "description": "A Boolean that specifies whether the virtual machine is in a VMC environment." + }, + "pendingSlaDomain": { + "description": "Describes any pending SLA Domain assignment on this object.", + "$ref": "#/definitions/ManagedObjectPendingSlaInfo" + }, + "vcenterName": { + "type": "string", + "description": "The name of vCenter that the virtual machine belongs to." + } + } + } + ] + }, + "VirtualMachineScriptDetail": { + "type": "object", + "required": [ + "failureHandling", + "scriptPath", + "timeoutMs" + ], + "properties": { + "scriptPath": { + "type": "string", + "description": "The command to be run in VM guest OS." + }, + "timeoutMs": { + "type": "integer", + "format": "int64", + "description": "Time (in ms) after which the script will be terminated if it has not completed." + }, + "failureHandling": { + "type": "string", + "description": "Action to take if the script returns an error or times out.", + "enum": [ + "abort", + "continue" + ] + } + } + }, + "VirtualMachineSummary": { + "allOf": [ + { + "$ref": "#/definitions/Snappable" + }, + { + "type": "object", + "required": [ + "folderPath", + "guestCredentialAuthorizationStatus", + "infraPath", + "ipAddress", + "isRelic", + "isReplicationEnabled", + "moid", + "snapshotConsistencyMandate", + "vmwareToolsInstalled" + ], + "properties": { + "moid": { + "type": "string" + }, + "vcenterId": { + "type": "string" + }, + "hostName": { + "type": "string" + }, + "hostId": { + "type": "string" + }, + "clusterName": { + "type": "string" + }, + "snapshotConsistencyMandate": { + "type": "string", + "description": "Consistency level mandated for this VM or empty string for none.", + "enum": [ + "UNKNOWN", + "INCONSISTENT", + "CRASH_CONSISTENT", + "FILE_SYSTEM_CONSISTENT", + "VSS_CONSISTENT", + "APP_CONSISTENT" + ] + }, + "powerStatus": { + "type": "string", + "description": "The power status of VM(ON,OFF,SLEEP etc.)." + }, + "protectionDate": { + "type": "string", + "format": "date-time" + }, + "ipAddress": { + "type": "string" + }, + "agentStatus": { + "description": "The status of the Rubrik Backup Service agent for virtual machines.", + "$ref": "#/definitions/AgentStatus" + }, + "toolsInstalled": { + "type": "boolean" + }, + "guestOsName": { + "type": "string" + }, + "isReplicationEnabled": { + "type": "boolean" + }, + "folderPath": { + "type": "array", + "description": "Brief info of all the objects in the folder path to this VM.", + "items": { + "$ref": "#/definitions/VmPathPoint" + } + }, + "infraPath": { + "type": "array", + "description": "Brief info of all the objects in the infrastructure path to this VM.", + "items": { + "$ref": "#/definitions/VmPathPoint" + } + }, + "vmwareToolsInstalled": { + "type": "boolean" + }, + "isRelic": { + "type": "boolean" + }, + "guestCredentialAuthorizationStatus": { + "type": "string", + "description": "Status of authentication with a specific virtual machine using guest credentials. Possible values are: SUCCESSFUL, PENDING, or FAILED." + }, + "cloudInstantiationSpec": { + "description": "Cloud instantiation specification for the selected virtual machine.", + "$ref": "#/definitions/CloudInstantiationSpec" + }, + "parentAppInfo": { + "description": "Configuration information for the vApp that manages a specified virtual machine.", + "$ref": "#/definitions/ParentAppInfo" + } + } + } + ] + }, + "VirtualMachineUpdate": { + "type": "object", + "properties": { + "maxNestedVsphereSnapshots": { + "type": "integer", + "format": "int32" + }, + "snapshotConsistencyMandate": { + "type": "string", + "description": "Consistency level mandated for this VM or empty string for none.", + "enum": [ + "UNKNOWN", + "INCONSISTENT", + "CRASH_CONSISTENT", + "FILE_SYSTEM_CONSISTENT", + "VSS_CONSISTENT", + "APP_CONSISTENT" + ] + }, + "isVmPaused": { + "type": "boolean", + "description": "Whether to pause or resume backups/archival for this VM." + }, + "configuredSlaDomainId": { + "type": "string", + "description": "Assign this VM to the given SLA domain. Existing snapshots of the object will be retained with the configuration of specified SLA Domain." + }, + "preBackupScript": { + "$ref": "#/definitions/VirtualMachineScriptDetail" + }, + "postSnapScript": { + "$ref": "#/definitions/VirtualMachineScriptDetail" + }, + "postBackupScript": { + "$ref": "#/definitions/VirtualMachineScriptDetail" + }, + "isArrayIntegrationEnabled": { + "type": "boolean", + "description": "User setting to dictate whether to use storage array snaphots for ingest. This setting only makes sense for VMs where array based ingest is possible." + }, + "cloudInstantiationSpec": { + "description": "Cloud instantiation specification for the selected virtual machine.", + "$ref": "#/definitions/CloudInstantiationSpec" + }, + "throttlingSettings": { + "$ref": "#/definitions/VmwareAdaptiveThrottlingSettings" + } + } + }, + "VirtualMachineUpdateWithSecret": { + "allOf": [ + { + "$ref": "#/definitions/VirtualMachineUpdate" + }, + { + "type": "object", + "properties": { + "shouldRefreshCacheAfterUpdate": { + "type": "boolean", + "description": "A boolean value that specifies whether an update also refreshes the in-memory cache. When 'false' updates do not refresh the in-memory cache. When 'true' updates refresh the in-memory cache. By default, this value is 'true'. Setting this value to 'false' reduces the time required for updates to complete." + }, + "guestCredential": { + "$ref": "#/definitions/BaseGuestCredential" + } + } + } + ] + }, + "VmEndPointStatus": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "VmGuestScriptRunConfig": { + "type": "object", + "required": [ + "phase" + ], + "properties": { + "phase": { + "type": "string", + "description": "Run the script configured (if any) for this phase.", + "enum": [ + "PreBackup", + "PostSnap", + "PostBackup" + ] + } + } + }, + "VmPathPoint": { + "type": "object", + "required": [ + "id", + "managedId", + "name" + ], + "properties": { + "id": { + "type": "string", + "description": "ID of the object." + }, + "managedId": { + "type": "string", + "description": "(Deprecated) - See **id**." + }, + "name": { + "type": "string", + "description": "Name of the object." + } + } + }, + "VmSnapshotSummaries": { + "type": "object", + "required": [ + "vmId", + "vmSnapshotsSummaries" + ], + "properties": { + "vmId": { + "type": "string", + "description": "The ID of the virtual machine." + }, + "vmSnapshotsSummaries": { + "type": "array", + "description": "A list of VmSnapshotSummary for the virtual machine.", + "items": { + "$ref": "#/definitions/VmSnapshotSummary" + } + } + }, + "description": "Snapshot summaries for the virtual machine." + }, + "VmSnapshotSummary": { + "allOf": [ + { + "$ref": "#/definitions/BaseSnapshotSummary" + }, + { + "type": "object", + "required": [ + "vmName" + ], + "properties": { + "vmName": { + "type": "string" + }, + "vNicsInfo": { + "type": "array", + "items": { + "$ref": "#/definitions/VmwareVNicBindingInfo" + } + } + } + } + ] + }, + "VmwareAdaptiveThrottlingSettings": { + "type": "object", + "properties": { + "ioLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold virtual machine latency value that determines whether to postpone a scheduled snapshot of the vSphere virtual machine. Specify the threshold value in milliseconds (ms)." + }, + "datastoreIoLatencyThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold datastore latency value, measured across all datastores, that determines whether to postpone a scheduled snapshot of a vSphere virtual machine. Specify the threshold value in milliseconds (ms)." + }, + "cpuUtilizationThreshold": { + "type": "integer", + "format": "int32", + "description": "Threshold virtual machine CPU utilization value that determines whether to postpone a scheduled snapshot of a vSphere virtual machine. Specify the threshold value as a percentage." + } + } + }, + "VmwareDatastoreFreespaceThreshold": { + "type": "object", + "required": [ + "threshold" + ], + "properties": { + "threshold": { + "type": "number", + "format": "double" + }, + "vmId": { + "type": "string" + } + } + }, + "VmwareHostSummary": { + "allOf": [ + { + "$ref": "#/definitions/SlaAssignable" + }, + { + "$ref": "#/definitions/EffectiveSlaHolder" + }, + { + "type": "object", + "required": [ + "ioFilterStatus" + ], + "properties": { + "datacenterId": { + "type": "string" + }, + "computeClusterId": { + "type": "string" + }, + "datastores": { + "type": "array", + "items": { + "$ref": "#/definitions/DataStoreSummary" + } + }, + "esxiVersion": { + "type": "string", + "description": "API Version of the ESXi Host." + }, + "ioFilterStatus": { + "$ref": "#/definitions/HostFilterStatus" + }, + "ioFilterUiStatus": { + "$ref": "#/definitions/HostUiFilterStatus" + }, + "isInVmc": { + "type": "boolean" + } + } + } + ] + }, + "VmwareNetworkCollection": { + "type": "object", + "required": [ + "networks" + ], + "properties": { + "networks": { + "type": "array", + "description": "A list of VMware network information.", + "items": { + "$ref": "#/definitions/VmwareNetworkInfo" + } + } + }, + "description": "A collection of VMware networks." + }, + "VmwareNetworkDeviceInfo": { + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "key": { + "type": "integer", + "format": "int32" + }, + "name": { + "type": "string" + } + } + }, + "VmwareNetworkInfo": { + "type": "object", + "required": [ + "moid", + "name" + ], + "properties": { + "moid": { + "type": "string" + }, + "name": { + "type": "string" + }, + "id": { + "type": "string", + "description": "ID of the VMware network." + }, + "networkType": { + "description": "VMware network type.", + "$ref": "#/definitions/VmwareNetworkType" + } + } + }, + "VmwareNetworkType": { + "type": "string", + "description": "VMware network type.", + "enum": [ + "LOCAL_NETWORK", + "DISTRIBUTED_PORT_GROUP", + "OPAQUE_NETWORK", + "OTHER" + ] + }, + "VmwareVNicBindingInfo": { + "type": "object", + "required": [ + "backingNetworkInfo", + "networkDeviceInfo" + ], + "properties": { + "networkDeviceInfo": { + "$ref": "#/definitions/VmwareNetworkDeviceInfo" + }, + "backingNetworkInfo": { + "$ref": "#/definitions/VmwareNetworkInfo" + } + } + }, + "VmwareVmMountSummary": { + "type": "object", + "required": [ + "id", + "isPreserveMoid", + "isReady", + "shouldRollback", + "snapshotDate", + "vmId", + "vmName" + ], + "properties": { + "id": { + "type": "string" + }, + "snapshotDate": { + "type": "string", + "format": "date-time" + }, + "vmId": { + "type": "string" + }, + "vmName": { + "type": "string" + }, + "virtualMachine": { + "$ref": "#/definitions/VirtualMachineSummary" + }, + "hostId": { + "type": "string" + }, + "hostName": { + "type": "string" + }, + "isReady": { + "type": "boolean" + }, + "isPreserveMoid": { + "type": "boolean" + }, + "shouldRollback": { + "type": "boolean" + }, + "mountRequestId": { + "type": "string" + }, + "migrateDatastoreRequestId": { + "type": "string" + }, + "unmountRequestId": { + "type": "string" + }, + "datastoreName": { + "type": "string", + "description": "The name of the datastore that contains the mounted VMDK." + }, + "datastoreReady": { + "type": "boolean", + "description": "A boolean value that specifies whether the datastore is ready. When 'true,' the datastore is ready. When 'false,' the datastore is not ready." + }, + "createDatastoreOnlyMount": { + "type": "boolean", + "description": "This boolean value determines whether or not the mount is created as a datastore only. When 'true,' the mount is created with datastore and not the associated virtual machine. When 'false,' the mount is created with both the datastore and the associated virtual machine." + }, + "hasAttachingDisk": { + "type": "boolean", + "description": "A Boolean value that determines whether this job is an attaching disk mount job. When 'true,' this is an attaching disk mount job. When 'false,' this is not an attaching disk mount job." + }, + "attachingDiskCount": { + "type": "integer", + "format": "int32", + "description": "An integer value that identifies how many disks are attached." + }, + "mountTimestamp": { + "type": "string", + "format": "date-time", + "description": "Gives the timestamp at which the mount was created." + } + } + }, + "VmwareVmMountSummaryListResponse": { + "type": "object", + "properties": { + "hasMore": { + "type": "boolean", + "description": "If there is more." + }, + "data": { + "type": "array", + "description": "List of matching objects.", + "items": { + "$ref": "#/definitions/VmwareVmMountSummary" + } + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total list responses." + } + } + }, + "ManagedObjectDescendantCountVolumeGroupFields": { + "type": "object", + "properties": { + "volumeGroup": { + "type": "integer", + "format": "int32", + "description": "Number of volume groups." + } + } + }, + "VolumeGroupDownloadFilesJobConfig": { + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "type": "array", + "description": "An array containing the full source path of each file and folder that is part of the download job. The array must contain at least one path.", + "items": { + "type": "string" + } + }, + "legalHoldDownloadConfig": { + "description": "An optional argument containing a Boolean parameter to depict if the download is being triggered for Legal Hold use case.", + "$ref": "#/definitions/LegalHoldDownloadConfig" + } + } + }, + "VolumeGroupMountSnapshotJobConfig": { + "type": "object", + "required": [ + "volumeConfigs" + ], + "properties": { + "volumeConfigs": { + "type": "array", + "description": "The configuration of the Volumes to be mounted on the Host.", + "items": { + "$ref": "#/definitions/VolumeGroupVolumeMountConfig" + } + }, + "targetHostId": { + "type": "string", + "description": "Only specified if mounting on a specific Host is desired. If not specified, Rubrik will simply expose addresses of SMB mounts per recovered Volume. If a mount point is specified in any of the volumeConfigs, this must be defined. If this is specified, but no mount points are, Rubrik will generate mount paths to mount on the target Host for each volume." + }, + "smbValidIps": { + "type": "array", + "description": "IP address of the hosts that are authenticated to access the SMB share.", + "items": { + "type": "string" + } + }, + "smbDomainName": { + "type": "string", + "description": "Domain name of the users that are authenticated to access the SMB share." + }, + "smbValidUsers": { + "type": "array", + "description": "Usernames of the users that are authenticated to access the SMB share.", + "items": { + "type": "string" + } + } + } + }, + "VolumeGroupRestoreFileConfig": { + "type": "object", + "required": [ + "path", + "restorePath" + ], + "properties": { + "path": { + "type": "string", + "description": "Absolute file path." + }, + "restorePath": { + "type": "string", + "description": "Target folder for the copied files." + } + } + }, + "VolumeGroupRestoreFilesConfig": { + "type": "object", + "required": [ + "restoreConfigs" + ], + "properties": { + "restoreConfigs": { + "type": "array", + "description": "Directory of folder to copy files into.", + "items": { + "$ref": "#/definitions/VolumeGroupRestoreFileConfig" + } + }, + "targetHostId": { + "type": "string", + "description": "Windows Host ID to restore files to. If left empty, the host ID of the Volume Group will be used." + } + } + }, + "VolumeGroupVolumeMountConfig": { + "type": "object", + "required": [ + "volumeId" + ], + "properties": { + "volumeId": { + "type": "string", + "description": "ID of the Volume to mount." + }, + "mountPointOnHost": { + "type": "string", + "description": "The path on the Host on which the Volume will be mounted. It must be either an untaken drive letter name, a directory that does not exist but on a valid drive letter, or an empty directory that already exists." + } + } + }, + "WindowsDiskLayoutInfo": { + "type": "object", + "required": [ + "diskNumber", + "diskSize", + "diskType", + "partitionCount", + "partitionEntry", + "partitionStyle" + ], + "properties": { + "diskNumber": { + "type": "integer", + "format": "int32", + "description": "Index of the specified disk. The unique integer that appears in the system disk name." + }, + "diskType": { + "type": "array", + "description": "Array containing the attributes of the specified disk.", + "items": { + "$ref": "#/definitions/WindowsDiskFlag" + } + }, + "diskSize": { + "type": "integer", + "format": "int64", + "description": "Size of the specified disk, in bytes." + }, + "partitionStyle": { + "description": "Partition type of the specified disk.", + "$ref": "#/definitions/WindowsPartitionStyle" + }, + "partitionCount": { + "type": "integer", + "format": "int32", + "description": "Number of partitions on the specified disk." + }, + "partitionEntry": { + "type": "array", + "description": "A variable-sized array, containing a member for each partition on the specified drive. Each member is a JSON object that describes a partition.", + "items": { + "$ref": "#/definitions/WindowsPartitionInfo" + } + }, + "Mbr": { + "description": "JSON object containing information about the master boot record (MBR) partitioning on the specified drive.", + "$ref": "#/definitions/WindowsDiskLayoutInfoMBR" + }, + "Gpt": { + "description": "JSON object containing information about the GUID partition table (GPT) partitioning on the specified drive.", + "$ref": "#/definitions/WindowsDiskLayoutInfoGPT" + } + } + }, + "WindowsDiskLayoutInfoGPT": { + "type": "object", + "required": [ + "diskId", + "maxPartitionCount", + "startingUsableOffset", + "usableLength" + ], + "properties": { + "diskId": { + "type": "string", + "description": "Disk GUID." + }, + "startingUsableOffset": { + "type": "integer", + "format": "int64", + "description": "Starting byte offset of the first usable block." + }, + "usableLength": { + "type": "integer", + "format": "int64", + "description": "Size of the usable blocks on the disk, in bytes." + }, + "maxPartitionCount": { + "type": "integer", + "format": "int32", + "description": "Maximum number of partitions that can be defined in the usable block." + } + } + }, + "WindowsDiskLayoutInfoMBR": { + "type": "object", + "required": [ + "signature" + ], + "properties": { + "signature": { + "type": "integer", + "format": "int64", + "description": "Integer value that is the signature of the specified drive, containing information about the MBR partitioning on the drive." + } + } + }, + "WindowsHostLayout": { + "type": "object", + "required": [ + "diskLayout", + "volumeLayout" + ], + "properties": { + "diskLayout": { + "type": "array", + "description": "Array with the layout information for all of the disks on the specified host.", + "items": { + "$ref": "#/definitions/WindowsDiskLayoutInfo" + } + }, + "volumeLayout": { + "type": "array", + "description": "Array with the layout information for all of the volumes on the specified host.", + "items": { + "$ref": "#/definitions/WindowsVolumeLayoutInfo" + } + } + } + }, + "WindowsPartitionInfo": { + "type": "object", + "required": [ + "partitionLength", + "partitionNumber", + "partitionStyle", + "rewritePartition", + "startingOffset" + ], + "properties": { + "partitionStyle": { + "description": "Format of the partition.", + "$ref": "#/definitions/WindowsPartitionStyle" + }, + "startingOffset": { + "type": "integer", + "format": "int64", + "description": "Starting offset of the partition." + }, + "partitionLength": { + "type": "integer", + "format": "int64", + "description": "Size of the partition, in bytes." + }, + "partitionNumber": { + "type": "integer", + "format": "int32", + "description": "Number of the partition, where 1 is the base." + }, + "rewritePartition": { + "type": "boolean", + "description": "Boolean value that indicates whether the specified partition is rewritable. Set to true to indicate that it is rewritable (required)." + }, + "Mbr": { + "description": "JSON object containing partition information specific to master boot record (MBR) disks.", + "$ref": "#/definitions/WindowsPartitionInfoMBR" + }, + "Gpt": { + "description": "JSON object containing partition information specific to GUID partition table (GPT) disks.", + "$ref": "#/definitions/WindowsPartitionInfoGPT" + } + } + }, + "WindowsPartitionInfoGPT": { + "type": "object", + "required": [ + "attributes", + "name", + "partitionId", + "partitionType" + ], + "properties": { + "partitionType": { + "description": "Partition type.", + "$ref": "#/definitions/WindowsPartitionTypeGPT" + }, + "partitionId": { + "type": "string", + "description": "Partition GUID." + }, + "attributes": { + "type": "array", + "description": "Bitmask of the enumeration values specified by WindowsPartitionAttributeGPT.", + "items": { + "$ref": "#/definitions/WindowsPartitionAttributeGPT" + } + }, + "name": { + "type": "string", + "description": "String value that describes the partition." + } + } + }, + "WindowsPartitionInfoMBR": { + "type": "object", + "required": [ + "bootIndicator", + "hiddenSectors", + "partitionType", + "recognizedPartition" + ], + "properties": { + "partitionType": { + "description": "Partition type used by the MBR disk drivers.", + "$ref": "#/definitions/WindowsPartitionTypeMBR" + }, + "bootIndicator": { + "type": "boolean", + "description": "Boolean value that indicates whether the specified partition in a boot partition. Set to true to indicate that it is a boot partition. Set to false to indicate it is not a boot partition." + }, + "recognizedPartition": { + "type": "boolean", + "description": "Boolean value that indicates whether the specified partition in a recognized type. Set to true to indicate that it is a recognized type. Set to false to indicate it is not a recognized type." + }, + "hiddenSectors": { + "type": "integer", + "format": "int32", + "description": "Integer value that specifies the number of hidden sectors to allocate when creating the partition table." + } + } + }, + "WindowsVolumeDiskExtent": { + "type": "object", + "required": [ + "diskNumber", + "extentLength", + "startingOffset" + ], + "properties": { + "diskNumber": { + "type": "integer", + "format": "int32", + "description": "Number of the disk that contains the specified extent." + }, + "startingOffset": { + "type": "integer", + "format": "int64", + "description": "Offset, from the beginning of the disk to the specified extent, in bytes." + }, + "extentLength": { + "type": "integer", + "format": "int64", + "description": "Number of bytes in the specified extent." + } + } + }, + "WindowsVolumeLayoutInfo": { + "type": "object", + "required": [ + "diskExtents", + "fsType", + "mountPoints", + "volumeFlags", + "volumeName", + "volumeType" + ], + "properties": { + "volumeName": { + "type": "string", + "description": "Name and unique identifier of the specified volume." + }, + "diskExtents": { + "type": "array", + "description": "Array containing a member for each physical extent of the specified volume.", + "items": { + "$ref": "#/definitions/WindowsVolumeDiskExtent" + } + }, + "mountPoints": { + "type": "array", + "description": "Array containing a member for each mount point of the specified volume.", + "items": { + "type": "string" + } + }, + "volumeType": { + "description": "Volume type of the specified volume.", + "$ref": "#/definitions/WindowsVolumeType" + }, + "fsType": { + "description": "File system type of the specified volume.", + "$ref": "#/definitions/WindowsVolumeFsType" + }, + "volumeFlags": { + "type": "array", + "description": "Array containing the attributes of the specified volume.", + "items": { + "$ref": "#/definitions/WindowsVolumeFlag" + } + } + } + }, + "WindowsDiskFlag": { + "type": "string", + "description": "Enumeration values used in bitmasks to specify various disk attributes.", + "enum": [ + "VDS_DF_AUDIO_CD", + "VDS_DF_HOTSPARE", + "VDS_DF_RESERVE_CAPABLE", + "VDS_DF_MASKED", + "VDS_DF_STYLE_CONVERTIBLE", + "VDS_DF_CLUSTERED", + "VDS_DF_READ_ONLY", + "VDS_DF_SYSTEM_DISK", + "VDS_DF_BOOT_DISK", + "VDS_DF_PAGEFILE_DISK", + "VDS_DF_HIBERNATIONFILE_DISK", + "VDS_DF_CRASHDUMP_DISK", + "VDS_DF_HAS_ARC_PATH", + "VDS_DF_DYNAMIC", + "VDS_DF_BOOT_FROM_DISK", + "VDS_DF_CURRENT_READ_ONLY" + ] + }, + "WindowsPartitionAttributeGPT": { + "type": "string", + "description": "Enumeration values used to specify the Extensible Firmware Interface (EFI) attributes of the partition.", + "enum": [ + "PLATFORM_REQUIRED", + "NO_DRIVE_LETTER", + "HIDDEN", + "SHADOW_COPY", + "READ_ONLY" + ] + }, + "WindowsPartitionStyle": { + "type": "string", + "description": "Enumeration values used to specify the format of a partition.", + "enum": [ + "PARTITION_STYLE_MBR", + "PARTITION_STYLE_GPT", + "PARTITION_STYLE_RAW", + "UNKNOWN" + ] + }, + "WindowsPartitionTypeGPT": { + "type": "string", + "description": "Enumeration values used to specify the GPT partition type.", + "enum": [ + "PARTITION_BASIC_DATA_GUID", + "PARTITION_ENTRY_UNUSED_GUID", + "PARTITION_SYSTEM_GUID", + "PARTITION_MSFT_RESERVED_GUID", + "PARTITION_LDM_METADATA_GUID", + "PARTITION_LDM_DATA_GUID", + "PARTITION_MSFT_RECOVERY_GUID", + "UNKNOWN" + ] + }, + "WindowsPartitionTypeMBR": { + "type": "string", + "description": "Enumeration values used to specify the valid partition types used by disk drivers.", + "enum": [ + "ENTRY_UNUSED", + "FAT_12", + "XENIX_1", + "XENIX_2", + "FAT_16", + "EXTENDED", + "HUGE_PARTITION", + "IFS", + "OS2BOOTMGR", + "FAT_32", + "FAT_32_XINT13", + "XINT13", + "XINT13_EXTENTDED", + "PREP", + "LDM", + "DM", + "EZDRIVE", + "UNIX", + "SPACES", + "GPT", + "NTFT", + "NTFT_VALID", + "UNKNOWN" + ] + }, + "WindowsVolumeFlag": { + "type": "string", + "description": "Enumeration values used in bitmasks to specify various volume attributes.", + "enum": [ + "VDS_VF_SYSTEM_VOLUME", + "VDS_VF_BOOT_VOLUME", + "VDS_VF_ACTIVE", + "VDS_VF_READONLY", + "VDS_VF_HIDDEN", + "VDS_VF_CAN_EXTEND", + "VDS_VF_CAN_SHRINK", + "VDS_VF_PAGEFILE", + "VDS_VF_HIBERNATION", + "VDS_VF_CRASHDUMP", + "VDS_VF_INSTALLABLE", + "VDS_VF_LBN_REMAP_ENABLED", + "VDS_VF_FORMATTING", + "VDS_VF_NOT_FORMATTABLE", + "VDS_VF_NTFS_NOT_SUPPORTED", + "VDS_VF_FAT32_NOT_SUPPORTED", + "VDS_VF_FAT_NOT_SUPPORTED", + "VDS_VF_NO_DEFAULT_DRIVE_LETTER", + "VDS_VF_PERMANENTLY_DISMOUNTED", + "VDS_VF_PERMANENT_DISMOUNT_SUPPORTED", + "VDS_VF_SHADOW_COPY", + "VDS_VF_FVE_ENABLED", + "VDS_VF_DIRTY", + "VDS_VF_REFS_NOT_SUPPORTED", + "VDS_VF_BACKS_BOOT_VOLUME", + "VDS_VF_BACKED_BY_WIM_IMAGE" + ] + }, + "WindowsVolumeFsType": { + "type": "string", + "description": "The file system type of a Windows volume.", + "enum": [ + "RAW_VOLUME", + "NTFS", + "FAT", + "FAT32", + "ReFS", + "UNKNOWN" + ] + }, + "WindowsVolumeType": { + "type": "string", + "description": "The volume type of a Windows volume.", + "enum": [ + "VDS_VT_UNKNOWN", + "VDS_VT_SIMPLE", + "VDS_VT_SPAN", + "VDS_VT_STRIPE", + "VDS_VT_MIRROR", + "VDS_VT_PARITY", + "UNKNOWN" + ] + } + } +}