diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 704f14657..dd0b453cb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,7 @@ jobs: # Build debian packages strategy: matrix: + # Focal builds are broken in CI, we can only build bionic for now #suite: [focal, bionic] suite: [bionic] include: @@ -65,6 +66,7 @@ jobs: # os-version: ubuntu-20.04 - suite: bionic os-version: ubuntu-18.04 + fail-fast: false runs-on: ${{ matrix.os-version }} steps: - uses: actions/checkout@v2 @@ -167,7 +169,7 @@ jobs: - name: Setup Java Action uses: actions/setup-java@v1 with: - java-version: '8.0.232' + java-version: '8.0.252' architecture: x64 - name: Install dependencies run: | @@ -305,6 +307,7 @@ jobs: if: github.event_name == 'release' && github.event.action == 'published' strategy: matrix: + # Focal builds are broken in CI, we can only build bionic for now #suite: [focal, bionic] suite: [bionic] include: @@ -312,6 +315,7 @@ jobs: # os-version: ubuntu-20.04 - suite: bionic os-version: ubuntu-18.04 + fail-fast: false runs-on: ${{ matrix.os-version }} steps: - uses: actions/checkout@v2 diff --git a/k8s/docker-entrypoint.sh b/k8s/docker-entrypoint.sh index 8443238c3..646f7d76a 100755 --- a/k8s/docker-entrypoint.sh +++ b/k8s/docker-entrypoint.sh @@ -41,7 +41,7 @@ restore() { echo "Skipping restore operation" else echo "Restoring backup $BACKUP_NAME" - python3 -m medusa.service.grpc.restore + python3 -m medusa.service.grpc.restore -- "/etc/medusa/medusa.ini" $RESTORE_KEY echo $RESTORE_KEY > $last_restore_file fi } diff --git a/medusa/config.py b/medusa/config.py index f18f3173f..c72dc6264 100644 --- a/medusa/config.py +++ b/medusa/config.py @@ -206,6 +206,9 @@ def parse_config(args, config_file): if evaluate_boolean(config['cassandra']['use_sudo']): logging.warning('Forcing use_sudo to False because Kubernetes mode is enabled') config['cassandra']['use_sudo'] = 'False' + config['storage']['use_sudo_for_restore'] = 'False' + if "POD_IP" in os.environ: + config['storage']['fqdn'] = os.environ["POD_IP"] resolve_ip_addresses = evaluate_boolean(config['cassandra']['resolve_ip_addresses']) config.set('cassandra', 'resolve_ip_addresses', 'True' if resolve_ip_addresses else 'False') diff --git a/medusa/network/hostname_resolver.py b/medusa/network/hostname_resolver.py index 8c6bc75dc..f6e4352fc 100644 --- a/medusa/network/hostname_resolver.py +++ b/medusa/network/hostname_resolver.py @@ -13,8 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import socket +import dns.resolver +import dns.reversename +import ipaddress import logging +import socket class HostnameResolver: @@ -23,16 +26,41 @@ def __init__(self, resolve_addresses, k8s_mode): self.k8s_mode = k8s_mode def resolve_fqdn(self, ip_address=''): + logging.info(f"Resolving ip address {ip_address}") ip_address_to_resolve = ip_address if ip_address != '' else socket.gethostbyname(socket.getfqdn()) - + logging.info(f"ip address to resolve {ip_address_to_resolve}") if str(self.resolve_addresses) == "False": logging.debug("Not resolving {} as requested".format(ip_address_to_resolve)) return ip_address_to_resolve - fqdn = socket.getfqdn(ip_address_to_resolve) - returned_fqdn = fqdn - if self.k8s_mode and fqdn.find('.') > 0: - returned_fqdn = fqdn.split('.')[0] - logging.debug("Resolved {} to {}".format(ip_address_to_resolve, returned_fqdn)) + hostname = socket.getfqdn(ip_address_to_resolve) + if self.k8s_mode: + hostname = self.compute_k8s_hostname(ip_address_to_resolve) + logging.debug("Resolved {} to {}".format(ip_address_to_resolve, hostname)) + + return hostname + + def compute_k8s_hostname(self, ip_address): + if (self.is_ipv4(ip_address) or self.is_ipv6(ip_address)): + reverse_name = dns.reversename.from_address(ip_address).to_text() + fqdns = dns.resolver.resolve(reverse_name, 'PTR') + for fqdn in fqdns: + if not self.is_ipv4(fqdn.to_text().split('.')[0].replace('-', '.')) \ + and not self.is_ipv6(fqdn.to_text().split('.')[0].replace('-', ':')): + return fqdn.to_text().split('.')[0] + + return ip_address + + def is_ipv4(self, ip_address): + try: + ipaddress.IPv4Network(ip_address) + return True + except ValueError: + return False - return returned_fqdn + def is_ipv6(self, ip_address): + try: + ipaddress.IPv6Network(ip_address) + return True + except ValueError: + return False diff --git a/medusa/purge.py b/medusa/purge.py index 38afa4e0b..9e042f3aa 100644 --- a/medusa/purge.py +++ b/medusa/purge.py @@ -43,11 +43,13 @@ def main(config, max_backup_age=0, max_backup_count=0): # list all backups to purge based on count conditions backups_to_purge |= set(backups_to_purge_by_count(backups, max_backup_count)) # purge all candidate backups - purge_backups(storage, backups_to_purge, config.storage.backup_grace_period_in_days, config.storage.fqdn) + (nb_objects_purged, total_purged_size, total_objects_within_grace) \ + = purge_backups(storage, backups_to_purge, config.storage.backup_grace_period_in_days, config.storage.fqdn) logging.debug('Emitting metrics') tags = ['medusa-node-backup', 'purge-error', 'PURGE-ERROR'] monitoring.send(tags, 0) + return (nb_objects_purged, total_purged_size, total_objects_within_grace, len(backups_to_purge)) except Exception as e: traceback.print_exc() tags = ['medusa-node-backup', 'purge-error', 'PURGE-ERROR'] @@ -75,13 +77,6 @@ def backups_to_purge_by_count(backups, max_backup_count): return sorted_node_backups[:backups_to_remove_count] return list() -# def backups_to_purge_by_name(backups, backup_name): -# """ -# Return a list -# Returns the list of the backups to delete for a given name (1 name = 1 backup, but on N nodes). -# """ -# return list(filter(lambda backup: backup.name = backup_name, backups)) or list() - def purge_backups(storage, backups, backup_grace_period_in_days, local_fqdn): """ @@ -122,6 +117,8 @@ def purge_backups(storage, backups, backup_grace_period_in_days, local_fqdn): backup_grace_period_in_days )) + return (nb_objects_purged, total_purged_size, total_objects_within_grace) + def purge_backup(storage, backup): purged_objects = 0 diff --git a/medusa/restore_cluster.py b/medusa/restore_cluster.py index e10e3a261..56a907a1f 100644 --- a/medusa/restore_cluster.py +++ b/medusa/restore_cluster.py @@ -14,9 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections import datetime import logging +import operator import socket import sys import traceback @@ -129,9 +129,9 @@ def __init__(self, cluster_backup, config, temp_dir, host_list, seed_target, kee self.fqdn_resolver = HostnameResolver(fqdn_resolver, k8s_mode) self._version_target = version_target - def execute(self): + def prepare_restore(self): logging.info('Ensuring the backup is found and is complete') - if not self.cluster_backup.is_complete(): + if not self.config.kubernetes.enabled and not self.cluster_backup.is_complete(): raise RuntimeError('Backup is not complete') # CASE 1 : We're restoring using a seed target. Source/target mapping will be built based on tokenmap. @@ -149,6 +149,8 @@ def execute(self): self._capture_release_version(session=None) logging.info('Starting Restore on all the nodes in this list: {}'.format(self.host_list)) + def execute(self): + self.prepare_restore() self._restore_data() @staticmethod @@ -224,23 +226,13 @@ def _chunk(my_list, nb_chunks): topology_matches = False if topology_matches: - # We can associate each restore node with exactly one backup node - backup_ringmap = collections.defaultdict(list) - target_ringmap = collections.defaultdict(list) - for token, host in backup_tokens.items(): - backup_ringmap[token].append(host) - for token, host in target_tokens.items(): - target_ringmap[token].append(host) - - self.ringmap = backup_ringmap - i = 0 - for token, hosts in backup_ringmap.items(): - # take the node that has the same token list or pick the one with the same position in the map. - restore_host = target_ringmap.get(token, list(target_ringmap.values())[i])[0] + # backup and restore nodes are ordered by smallest token and associated one by one + sorted_backup_nodes = self._tokenmap_to_sorted_nodes(tokenmap) + sorted_target_nodes = self._tokenmap_to_sorted_nodes(target_tokenmap) + for i in range(len(sorted_backup_nodes)): + restore_host = sorted_target_nodes[i][0] is_seed = True if self.fqdn_resolver.resolve_fqdn(restore_host) in self._get_seeds_fqdn() else False - self.host_map[restore_host] = {'source': [hosts[0]], 'seed': is_seed} - i += 1 - logging.debug("self.host_map: {}".format(self.host_map)) + self.host_map[restore_host] = {'source': [sorted_backup_nodes[i][0]], 'seed': is_seed} else: # Topologies are different between backup and restore clusters. Using the sstableloader for restore. self.use_sstableloader = True @@ -254,11 +246,19 @@ def _chunk(my_list, nb_chunks): # associate one restore host with several backups as we don't have the same number of nodes. self.host_map[restore_hosts[i]] = {'source': grouped_backups[i], 'seed': False} + def _tokenmap_to_sorted_nodes(self, tokenmap): + nodes = dict() + for node in tokenmap.keys(): + nodes[node] = tokenmap[node]['tokens'][0] + return sorted(nodes.items(), key=operator.itemgetter(1)) + @staticmethod def _is_restore_in_place(backup_tokenmap, target_tokenmap): # If at least one node is part of both tokenmaps, then we're restoring in place # Otherwise we're restoring a remote cluster - return len(set(backup_tokenmap.keys()) & set(target_tokenmap.keys())) > 0 + logging.info(f"backup tokenmap keys: {backup_tokenmap.keys()}") + logging.info(f"target tokenmap keys: {target_tokenmap.keys()}") + return len(set(backup_tokenmap.keys()).intersection(set(target_tokenmap.keys()))) > 0 def _get_seeds_fqdn(self): seeds = list() diff --git a/medusa/service/grpc/client.py b/medusa/service/grpc/client.py index ed4b01d07..d51fc08f7 100644 --- a/medusa/service/grpc/client.py +++ b/medusa/service/grpc/client.py @@ -93,10 +93,21 @@ def get_backup_status(self, name): def backup_exists(self, name): try: - stub = medusa_pb2_grpc.MedusaStub(self.channel) - request = medusa_pb2.BackupStatusRequest(backupName=name) - stub.BackupStatus(request) - return True + backups = self.get_backups() + for backup in list(backups): + if backup.backupName == name: + return True + return False except grpc.RpcError as e: logging.error("Failed to determine if backup exists for backup name: {} due to error: {}".format(name, e)) return False + + def purge_backups(self): + try: + stub = medusa_pb2_grpc.MedusaStub(self.channel) + request = medusa_pb2.PurgeBackupsRequest() + resp = stub.PurgeBackups(request) + return resp + except grpc.RpcError as e: + logging.error("Failed to purge backups due to error: {}".format(e)) + return None diff --git a/medusa/service/grpc/medusa.proto b/medusa/service/grpc/medusa.proto index 7285ce493..6a059e9eb 100644 --- a/medusa/service/grpc/medusa.proto +++ b/medusa/service/grpc/medusa.proto @@ -10,6 +10,10 @@ service Medusa { rpc DeleteBackup(DeleteBackupRequest) returns (DeleteBackupResponse); rpc GetBackups(GetBackupsRequest) returns (GetBackupsResponse); + + rpc PurgeBackups(PurgeBackupsRequest) returns (PurgeBackupsResponse); + + rpc PrepareRestore(PrepareRestoreRequest) returns (PrepareRestoreResponse); } enum StatusType { @@ -71,6 +75,7 @@ message BackupSummary { int32 finishedNodes = 5; repeated BackupNode nodes = 6; StatusType status = 7; + string backupType = 8; } message BackupNode { @@ -78,4 +83,23 @@ message BackupNode { repeated int64 tokens = 2; string datacenter = 3; string rack = 4; +} + +message PurgeBackupsRequest { +} + +message PurgeBackupsResponse { + int32 nbBackupsPurged = 1; + int32 nbObjectsPurged = 2; + int64 totalPurgedSize = 3; + int32 totalObjectsWithinGcGrace = 4; +} + +message PrepareRestoreRequest { + string backupName = 1; + string datacenter = 2; + string restoreKey = 3; +} + +message PrepareRestoreResponse { } \ No newline at end of file diff --git a/medusa/service/grpc/medusa_pb2.py b/medusa/service/grpc/medusa_pb2.py index 3f217c08a..3960ac06b 100644 --- a/medusa/service/grpc/medusa_pb2.py +++ b/medusa/service/grpc/medusa_pb2.py @@ -7,52 +7,54 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database - # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() + + + DESCRIPTOR = _descriptor.FileDescriptor( - name='medusa.proto', - package='', - syntax='proto3', - serialized_options=None, - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x0cmedusa.proto\"d\n\rBackupRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12!\n\x04mode\x18\x02 \x01(\x0e\x32\x13.BackupRequest.Mode\"\"\n\x04Mode\x12\x10\n\x0c\x44IFFERENTIAL\x10\x00\x12\x08\n\x04\x46ULL\x10\x01\"A\n\x0e\x42\x61\x63kupResponse\x12\x12\n\nbackupName\x18\x01 \x01(\t\x12\x1b\n\x06status\x18\x02 \x01(\x0e\x32\x0b.StatusType\")\n\x13\x42\x61\x63kupStatusRequest\x12\x12\n\nbackupName\x18\x01 \x01(\t\"\xa0\x01\n\x14\x42\x61\x63kupStatusResponse\x12\x15\n\rfinishedNodes\x18\x01 \x03(\t\x12\x17\n\x0funfinishedNodes\x18\x02 \x03(\t\x12\x14\n\x0cmissingNodes\x18\x03 \x03(\t\x12\x11\n\tstartTime\x18\x04 \x01(\t\x12\x12\n\nfinishTime\x18\x05 \x01(\t\x12\x1b\n\x06status\x18\x06 \x01(\x0e\x32\x0b.StatusType\"#\n\x13\x44\x65leteBackupRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"A\n\x14\x44\x65leteBackupResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x06status\x18\x02 \x01(\x0e\x32\x0b.StatusType\"\x13\n\x11GetBackupsRequest\"Y\n\x12GetBackupsResponse\x12\x1f\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32\x0e.BackupSummary\x12\"\n\roverallStatus\x18\x02 \x01(\x0e\x32\x0b.StatusType\"\xae\x01\n\rBackupSummary\x12\x12\n\nbackupName\x18\x01 \x01(\t\x12\x11\n\tstartTime\x18\x02 \x01(\x03\x12\x12\n\nfinishTime\x18\x03 \x01(\x03\x12\x12\n\ntotalNodes\x18\x04 \x01(\x05\x12\x15\n\rfinishedNodes\x18\x05 \x01(\x05\x12\x1a\n\x05nodes\x18\x06 \x03(\x0b\x32\x0b.BackupNode\x12\x1b\n\x06status\x18\x07 \x01(\x0e\x32\x0b.StatusType\"L\n\nBackupNode\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0e\n\x06tokens\x18\x02 \x03(\x03\x12\x12\n\ndatacenter\x18\x03 \x01(\t\x12\x0c\n\x04rack\x18\x04 \x01(\t*C\n\nStatusType\x12\x0f\n\x0bIN_PROGRESS\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0b\n\x07UNKNOWN\x10\x03\x32\x94\x02\n\x06Medusa\x12)\n\x06\x42\x61\x63kup\x12\x0e.BackupRequest\x1a\x0f.BackupResponse\x12.\n\x0b\x41syncBackup\x12\x0e.BackupRequest\x1a\x0f.BackupResponse\x12;\n\x0c\x42\x61\x63kupStatus\x12\x14.BackupStatusRequest\x1a\x15.BackupStatusResponse\x12;\n\x0c\x44\x65leteBackup\x12\x14.DeleteBackupRequest\x1a\x15.DeleteBackupResponse\x12\x35\n\nGetBackups\x12\x12.GetBackupsRequest\x1a\x13.GetBackupsResponseb\x06proto3' + name='medusa.proto', + package='', + syntax='proto3', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x0cmedusa.proto\"d\n\rBackupRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12!\n\x04mode\x18\x02 \x01(\x0e\x32\x13.BackupRequest.Mode\"\"\n\x04Mode\x12\x10\n\x0c\x44IFFERENTIAL\x10\x00\x12\x08\n\x04\x46ULL\x10\x01\"A\n\x0e\x42\x61\x63kupResponse\x12\x12\n\nbackupName\x18\x01 \x01(\t\x12\x1b\n\x06status\x18\x02 \x01(\x0e\x32\x0b.StatusType\")\n\x13\x42\x61\x63kupStatusRequest\x12\x12\n\nbackupName\x18\x01 \x01(\t\"\xa0\x01\n\x14\x42\x61\x63kupStatusResponse\x12\x15\n\rfinishedNodes\x18\x01 \x03(\t\x12\x17\n\x0funfinishedNodes\x18\x02 \x03(\t\x12\x14\n\x0cmissingNodes\x18\x03 \x03(\t\x12\x11\n\tstartTime\x18\x04 \x01(\t\x12\x12\n\nfinishTime\x18\x05 \x01(\t\x12\x1b\n\x06status\x18\x06 \x01(\x0e\x32\x0b.StatusType\"#\n\x13\x44\x65leteBackupRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"A\n\x14\x44\x65leteBackupResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x06status\x18\x02 \x01(\x0e\x32\x0b.StatusType\"\x13\n\x11GetBackupsRequest\"Y\n\x12GetBackupsResponse\x12\x1f\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32\x0e.BackupSummary\x12\"\n\roverallStatus\x18\x02 \x01(\x0e\x32\x0b.StatusType\"\xc2\x01\n\rBackupSummary\x12\x12\n\nbackupName\x18\x01 \x01(\t\x12\x11\n\tstartTime\x18\x02 \x01(\x03\x12\x12\n\nfinishTime\x18\x03 \x01(\x03\x12\x12\n\ntotalNodes\x18\x04 \x01(\x05\x12\x15\n\rfinishedNodes\x18\x05 \x01(\x05\x12\x1a\n\x05nodes\x18\x06 \x03(\x0b\x32\x0b.BackupNode\x12\x1b\n\x06status\x18\x07 \x01(\x0e\x32\x0b.StatusType\x12\x12\n\nbackupType\x18\x08 \x01(\t\"L\n\nBackupNode\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0e\n\x06tokens\x18\x02 \x03(\x03\x12\x12\n\ndatacenter\x18\x03 \x01(\t\x12\x0c\n\x04rack\x18\x04 \x01(\t\"\x15\n\x13PurgeBackupsRequest\"\x84\x01\n\x14PurgeBackupsResponse\x12\x17\n\x0fnbBackupsPurged\x18\x01 \x01(\x05\x12\x17\n\x0fnbObjectsPurged\x18\x02 \x01(\x05\x12\x17\n\x0ftotalPurgedSize\x18\x03 \x01(\x03\x12!\n\x19totalObjectsWithinGcGrace\x18\x04 \x01(\x05\"S\n\x15PrepareRestoreRequest\x12\x12\n\nbackupName\x18\x01 \x01(\t\x12\x12\n\ndatacenter\x18\x02 \x01(\t\x12\x12\n\nrestoreKey\x18\x03 \x01(\t\"\x18\n\x16PrepareRestoreResponse*C\n\nStatusType\x12\x0f\n\x0bIN_PROGRESS\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0b\n\x07UNKNOWN\x10\x03\x32\x94\x03\n\x06Medusa\x12)\n\x06\x42\x61\x63kup\x12\x0e.BackupRequest\x1a\x0f.BackupResponse\x12.\n\x0b\x41syncBackup\x12\x0e.BackupRequest\x1a\x0f.BackupResponse\x12;\n\x0c\x42\x61\x63kupStatus\x12\x14.BackupStatusRequest\x1a\x15.BackupStatusResponse\x12;\n\x0c\x44\x65leteBackup\x12\x14.DeleteBackupRequest\x1a\x15.DeleteBackupResponse\x12\x35\n\nGetBackups\x12\x12.GetBackupsRequest\x1a\x13.GetBackupsResponse\x12;\n\x0cPurgeBackups\x12\x14.PurgeBackupsRequest\x1a\x15.PurgeBackupsResponse\x12\x41\n\x0ePrepareRestore\x12\x16.PrepareRestoreRequest\x1a\x17.PrepareRestoreResponseb\x06proto3' ) _STATUSTYPE = _descriptor.EnumDescriptor( - name='StatusType', - full_name='StatusType', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='IN_PROGRESS', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='SUCCESS', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='FAILED', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='UNKNOWN', index=3, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - serialized_start=862, - serialized_end=929, + name='StatusType', + full_name='StatusType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IN_PROGRESS', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SUCCESS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FAILED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=1151, + serialized_end=1218, ) _sym_db.RegisterEnumDescriptor(_STATUSTYPE) @@ -62,459 +64,626 @@ FAILED = 2 UNKNOWN = 3 + _BACKUPREQUEST_MODE = _descriptor.EnumDescriptor( - name='Mode', - full_name='BackupRequest.Mode', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='DIFFERENTIAL', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='FULL', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - serialized_start=82, - serialized_end=116, + name='Mode', + full_name='BackupRequest.Mode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='DIFFERENTIAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FULL', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=82, + serialized_end=116, ) _sym_db.RegisterEnumDescriptor(_BACKUPREQUEST_MODE) + _BACKUPREQUEST = _descriptor.Descriptor( - name='BackupRequest', - full_name='BackupRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='BackupRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='mode', full_name='BackupRequest.mode', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _BACKUPREQUEST_MODE, - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=16, - serialized_end=116, + name='BackupRequest', + full_name='BackupRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='BackupRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mode', full_name='BackupRequest.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _BACKUPREQUEST_MODE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16, + serialized_end=116, ) + _BACKUPRESPONSE = _descriptor.Descriptor( - name='BackupResponse', - full_name='BackupResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='backupName', full_name='BackupResponse.backupName', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='status', full_name='BackupResponse.status', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=118, - serialized_end=183, + name='BackupResponse', + full_name='BackupResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='backupName', full_name='BackupResponse.backupName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='status', full_name='BackupResponse.status', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=118, + serialized_end=183, ) + _BACKUPSTATUSREQUEST = _descriptor.Descriptor( - name='BackupStatusRequest', - full_name='BackupStatusRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='backupName', full_name='BackupStatusRequest.backupName', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=185, - serialized_end=226, + name='BackupStatusRequest', + full_name='BackupStatusRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='backupName', full_name='BackupStatusRequest.backupName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=185, + serialized_end=226, ) + _BACKUPSTATUSRESPONSE = _descriptor.Descriptor( - name='BackupStatusResponse', - full_name='BackupStatusResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='finishedNodes', full_name='BackupStatusResponse.finishedNodes', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='unfinishedNodes', full_name='BackupStatusResponse.unfinishedNodes', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='missingNodes', full_name='BackupStatusResponse.missingNodes', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='startTime', full_name='BackupStatusResponse.startTime', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='finishTime', full_name='BackupStatusResponse.finishTime', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='status', full_name='BackupStatusResponse.status', index=5, - number=6, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=229, - serialized_end=389, + name='BackupStatusResponse', + full_name='BackupStatusResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='finishedNodes', full_name='BackupStatusResponse.finishedNodes', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unfinishedNodes', full_name='BackupStatusResponse.unfinishedNodes', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='missingNodes', full_name='BackupStatusResponse.missingNodes', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='startTime', full_name='BackupStatusResponse.startTime', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='finishTime', full_name='BackupStatusResponse.finishTime', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='status', full_name='BackupStatusResponse.status', index=5, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=229, + serialized_end=389, ) + _DELETEBACKUPREQUEST = _descriptor.Descriptor( - name='DeleteBackupRequest', - full_name='DeleteBackupRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='DeleteBackupRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=391, - serialized_end=426, + name='DeleteBackupRequest', + full_name='DeleteBackupRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='DeleteBackupRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=391, + serialized_end=426, ) + _DELETEBACKUPRESPONSE = _descriptor.Descriptor( - name='DeleteBackupResponse', - full_name='DeleteBackupResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='DeleteBackupResponse.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='status', full_name='DeleteBackupResponse.status', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=428, - serialized_end=493, + name='DeleteBackupResponse', + full_name='DeleteBackupResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='DeleteBackupResponse.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='status', full_name='DeleteBackupResponse.status', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=428, + serialized_end=493, ) + _GETBACKUPSREQUEST = _descriptor.Descriptor( - name='GetBackupsRequest', - full_name='GetBackupsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=495, - serialized_end=514, + name='GetBackupsRequest', + full_name='GetBackupsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=495, + serialized_end=514, ) + _GETBACKUPSRESPONSE = _descriptor.Descriptor( - name='GetBackupsResponse', - full_name='GetBackupsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='backups', full_name='GetBackupsResponse.backups', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='overallStatus', full_name='GetBackupsResponse.overallStatus', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=516, - serialized_end=605, + name='GetBackupsResponse', + full_name='GetBackupsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='backups', full_name='GetBackupsResponse.backups', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='overallStatus', full_name='GetBackupsResponse.overallStatus', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=516, + serialized_end=605, ) + _BACKUPSUMMARY = _descriptor.Descriptor( - name='BackupSummary', - full_name='BackupSummary', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='backupName', full_name='BackupSummary.backupName', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='startTime', full_name='BackupSummary.startTime', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='finishTime', full_name='BackupSummary.finishTime', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='totalNodes', full_name='BackupSummary.totalNodes', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='finishedNodes', full_name='BackupSummary.finishedNodes', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='nodes', full_name='BackupSummary.nodes', index=5, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='status', full_name='BackupSummary.status', index=6, - number=7, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=608, - serialized_end=782, + name='BackupSummary', + full_name='BackupSummary', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='backupName', full_name='BackupSummary.backupName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='startTime', full_name='BackupSummary.startTime', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='finishTime', full_name='BackupSummary.finishTime', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='totalNodes', full_name='BackupSummary.totalNodes', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='finishedNodes', full_name='BackupSummary.finishedNodes', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nodes', full_name='BackupSummary.nodes', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='status', full_name='BackupSummary.status', index=6, + number=7, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='backupType', full_name='BackupSummary.backupType', index=7, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=608, + serialized_end=802, ) + _BACKUPNODE = _descriptor.Descriptor( - name='BackupNode', - full_name='BackupNode', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='host', full_name='BackupNode.host', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='tokens', full_name='BackupNode.tokens', index=1, - number=2, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='datacenter', full_name='BackupNode.datacenter', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='rack', full_name='BackupNode.rack', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=784, - serialized_end=860, + name='BackupNode', + full_name='BackupNode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='host', full_name='BackupNode.host', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tokens', full_name='BackupNode.tokens', index=1, + number=2, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='datacenter', full_name='BackupNode.datacenter', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='rack', full_name='BackupNode.rack', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=804, + serialized_end=880, +) + + +_PURGEBACKUPSREQUEST = _descriptor.Descriptor( + name='PurgeBackupsRequest', + full_name='PurgeBackupsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=882, + serialized_end=903, +) + + +_PURGEBACKUPSRESPONSE = _descriptor.Descriptor( + name='PurgeBackupsResponse', + full_name='PurgeBackupsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='nbBackupsPurged', full_name='PurgeBackupsResponse.nbBackupsPurged', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nbObjectsPurged', full_name='PurgeBackupsResponse.nbObjectsPurged', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='totalPurgedSize', full_name='PurgeBackupsResponse.totalPurgedSize', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='totalObjectsWithinGcGrace', full_name='PurgeBackupsResponse.totalObjectsWithinGcGrace', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=906, + serialized_end=1038, +) + + +_PREPARERESTOREREQUEST = _descriptor.Descriptor( + name='PrepareRestoreRequest', + full_name='PrepareRestoreRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='backupName', full_name='PrepareRestoreRequest.backupName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='datacenter', full_name='PrepareRestoreRequest.datacenter', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='restoreKey', full_name='PrepareRestoreRequest.restoreKey', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1040, + serialized_end=1123, +) + + +_PREPARERESTORERESPONSE = _descriptor.Descriptor( + name='PrepareRestoreResponse', + full_name='PrepareRestoreResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1125, + serialized_end=1149, ) _BACKUPREQUEST.fields_by_name['mode'].enum_type = _BACKUPREQUEST_MODE @@ -536,140 +705,194 @@ DESCRIPTOR.message_types_by_name['GetBackupsResponse'] = _GETBACKUPSRESPONSE DESCRIPTOR.message_types_by_name['BackupSummary'] = _BACKUPSUMMARY DESCRIPTOR.message_types_by_name['BackupNode'] = _BACKUPNODE +DESCRIPTOR.message_types_by_name['PurgeBackupsRequest'] = _PURGEBACKUPSREQUEST +DESCRIPTOR.message_types_by_name['PurgeBackupsResponse'] = _PURGEBACKUPSRESPONSE +DESCRIPTOR.message_types_by_name['PrepareRestoreRequest'] = _PREPARERESTOREREQUEST +DESCRIPTOR.message_types_by_name['PrepareRestoreResponse'] = _PREPARERESTORERESPONSE DESCRIPTOR.enum_types_by_name['StatusType'] = _STATUSTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) BackupRequest = _reflection.GeneratedProtocolMessageType('BackupRequest', (_message.Message,), { - 'DESCRIPTOR': _BACKUPREQUEST, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupRequest) -}) + 'DESCRIPTOR' : _BACKUPREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupRequest) + }) _sym_db.RegisterMessage(BackupRequest) BackupResponse = _reflection.GeneratedProtocolMessageType('BackupResponse', (_message.Message,), { - 'DESCRIPTOR': _BACKUPRESPONSE, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupResponse) -}) + 'DESCRIPTOR' : _BACKUPRESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupResponse) + }) _sym_db.RegisterMessage(BackupResponse) BackupStatusRequest = _reflection.GeneratedProtocolMessageType('BackupStatusRequest', (_message.Message,), { - 'DESCRIPTOR': _BACKUPSTATUSREQUEST, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupStatusRequest) -}) + 'DESCRIPTOR' : _BACKUPSTATUSREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupStatusRequest) + }) _sym_db.RegisterMessage(BackupStatusRequest) BackupStatusResponse = _reflection.GeneratedProtocolMessageType('BackupStatusResponse', (_message.Message,), { - 'DESCRIPTOR': _BACKUPSTATUSRESPONSE, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupStatusResponse) -}) + 'DESCRIPTOR' : _BACKUPSTATUSRESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupStatusResponse) + }) _sym_db.RegisterMessage(BackupStatusResponse) DeleteBackupRequest = _reflection.GeneratedProtocolMessageType('DeleteBackupRequest', (_message.Message,), { - 'DESCRIPTOR': _DELETEBACKUPREQUEST, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:DeleteBackupRequest) -}) + 'DESCRIPTOR' : _DELETEBACKUPREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:DeleteBackupRequest) + }) _sym_db.RegisterMessage(DeleteBackupRequest) DeleteBackupResponse = _reflection.GeneratedProtocolMessageType('DeleteBackupResponse', (_message.Message,), { - 'DESCRIPTOR': _DELETEBACKUPRESPONSE, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:DeleteBackupResponse) -}) + 'DESCRIPTOR' : _DELETEBACKUPRESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:DeleteBackupResponse) + }) _sym_db.RegisterMessage(DeleteBackupResponse) GetBackupsRequest = _reflection.GeneratedProtocolMessageType('GetBackupsRequest', (_message.Message,), { - 'DESCRIPTOR': _GETBACKUPSREQUEST, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:GetBackupsRequest) -}) + 'DESCRIPTOR' : _GETBACKUPSREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:GetBackupsRequest) + }) _sym_db.RegisterMessage(GetBackupsRequest) GetBackupsResponse = _reflection.GeneratedProtocolMessageType('GetBackupsResponse', (_message.Message,), { - 'DESCRIPTOR': _GETBACKUPSRESPONSE, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:GetBackupsResponse) -}) + 'DESCRIPTOR' : _GETBACKUPSRESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:GetBackupsResponse) + }) _sym_db.RegisterMessage(GetBackupsResponse) BackupSummary = _reflection.GeneratedProtocolMessageType('BackupSummary', (_message.Message,), { - 'DESCRIPTOR': _BACKUPSUMMARY, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupSummary) -}) + 'DESCRIPTOR' : _BACKUPSUMMARY, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupSummary) + }) _sym_db.RegisterMessage(BackupSummary) BackupNode = _reflection.GeneratedProtocolMessageType('BackupNode', (_message.Message,), { - 'DESCRIPTOR': _BACKUPNODE, - '__module__': 'medusa_pb2' - # @@protoc_insertion_point(class_scope:BackupNode) -}) + 'DESCRIPTOR' : _BACKUPNODE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:BackupNode) + }) _sym_db.RegisterMessage(BackupNode) +PurgeBackupsRequest = _reflection.GeneratedProtocolMessageType('PurgeBackupsRequest', (_message.Message,), { + 'DESCRIPTOR' : _PURGEBACKUPSREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:PurgeBackupsRequest) + }) +_sym_db.RegisterMessage(PurgeBackupsRequest) + +PurgeBackupsResponse = _reflection.GeneratedProtocolMessageType('PurgeBackupsResponse', (_message.Message,), { + 'DESCRIPTOR' : _PURGEBACKUPSRESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:PurgeBackupsResponse) + }) +_sym_db.RegisterMessage(PurgeBackupsResponse) + +PrepareRestoreRequest = _reflection.GeneratedProtocolMessageType('PrepareRestoreRequest', (_message.Message,), { + 'DESCRIPTOR' : _PREPARERESTOREREQUEST, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:PrepareRestoreRequest) + }) +_sym_db.RegisterMessage(PrepareRestoreRequest) + +PrepareRestoreResponse = _reflection.GeneratedProtocolMessageType('PrepareRestoreResponse', (_message.Message,), { + 'DESCRIPTOR' : _PREPARERESTORERESPONSE, + '__module__' : 'medusa_pb2' + # @@protoc_insertion_point(class_scope:PrepareRestoreResponse) + }) +_sym_db.RegisterMessage(PrepareRestoreResponse) + + + _MEDUSA = _descriptor.ServiceDescriptor( - name='Medusa', - full_name='Medusa', - file=DESCRIPTOR, + name='Medusa', + full_name='Medusa', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=1221, + serialized_end=1625, + methods=[ + _descriptor.MethodDescriptor( + name='Backup', + full_name='Medusa.Backup', index=0, + containing_service=None, + input_type=_BACKUPREQUEST, + output_type=_BACKUPRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='AsyncBackup', + full_name='Medusa.AsyncBackup', + index=1, + containing_service=None, + input_type=_BACKUPREQUEST, + output_type=_BACKUPRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='BackupStatus', + full_name='Medusa.BackupStatus', + index=2, + containing_service=None, + input_type=_BACKUPSTATUSREQUEST, + output_type=_BACKUPSTATUSRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='DeleteBackup', + full_name='Medusa.DeleteBackup', + index=3, + containing_service=None, + input_type=_DELETEBACKUPREQUEST, + output_type=_DELETEBACKUPRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='GetBackups', + full_name='Medusa.GetBackups', + index=4, + containing_service=None, + input_type=_GETBACKUPSREQUEST, + output_type=_GETBACKUPSRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='PurgeBackups', + full_name='Medusa.PurgeBackups', + index=5, + containing_service=None, + input_type=_PURGEBACKUPSREQUEST, + output_type=_PURGEBACKUPSRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='PrepareRestore', + full_name='Medusa.PrepareRestore', + index=6, + containing_service=None, + input_type=_PREPARERESTOREREQUEST, + output_type=_PREPARERESTORERESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_start=932, - serialized_end=1208, - methods=[ - _descriptor.MethodDescriptor( - name='Backup', - full_name='Medusa.Backup', - index=0, - containing_service=None, - input_type=_BACKUPREQUEST, - output_type=_BACKUPRESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name='AsyncBackup', - full_name='Medusa.AsyncBackup', - index=1, - containing_service=None, - input_type=_BACKUPREQUEST, - output_type=_BACKUPRESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name='BackupStatus', - full_name='Medusa.BackupStatus', - index=2, - containing_service=None, - input_type=_BACKUPSTATUSREQUEST, - output_type=_BACKUPSTATUSRESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name='DeleteBackup', - full_name='Medusa.DeleteBackup', - index=3, - containing_service=None, - input_type=_DELETEBACKUPREQUEST, - output_type=_DELETEBACKUPRESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name='GetBackups', - full_name='Medusa.GetBackups', - index=4, - containing_service=None, - input_type=_GETBACKUPSREQUEST, - output_type=_GETBACKUPSRESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - ]) + ), +]) _sym_db.RegisterServiceDescriptor(_MEDUSA) DESCRIPTOR.services_by_name['Medusa'] = _MEDUSA diff --git a/medusa/service/grpc/medusa_pb2_grpc.py b/medusa/service/grpc/medusa_pb2_grpc.py index 5e69a8c9a..aa990f6be 100644 --- a/medusa/service/grpc/medusa_pb2_grpc.py +++ b/medusa/service/grpc/medusa_pb2_grpc.py @@ -15,30 +15,40 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.Backup = channel.unary_unary( - '/Medusa/Backup', - request_serializer=medusa__pb2.BackupRequest.SerializeToString, - response_deserializer=medusa__pb2.BackupResponse.FromString, - ) + '/Medusa/Backup', + request_serializer=medusa__pb2.BackupRequest.SerializeToString, + response_deserializer=medusa__pb2.BackupResponse.FromString, + ) self.AsyncBackup = channel.unary_unary( - '/Medusa/AsyncBackup', - request_serializer=medusa__pb2.BackupRequest.SerializeToString, - response_deserializer=medusa__pb2.BackupResponse.FromString, - ) + '/Medusa/AsyncBackup', + request_serializer=medusa__pb2.BackupRequest.SerializeToString, + response_deserializer=medusa__pb2.BackupResponse.FromString, + ) self.BackupStatus = channel.unary_unary( - '/Medusa/BackupStatus', - request_serializer=medusa__pb2.BackupStatusRequest.SerializeToString, - response_deserializer=medusa__pb2.BackupStatusResponse.FromString, - ) + '/Medusa/BackupStatus', + request_serializer=medusa__pb2.BackupStatusRequest.SerializeToString, + response_deserializer=medusa__pb2.BackupStatusResponse.FromString, + ) self.DeleteBackup = channel.unary_unary( - '/Medusa/DeleteBackup', - request_serializer=medusa__pb2.DeleteBackupRequest.SerializeToString, - response_deserializer=medusa__pb2.DeleteBackupResponse.FromString, - ) + '/Medusa/DeleteBackup', + request_serializer=medusa__pb2.DeleteBackupRequest.SerializeToString, + response_deserializer=medusa__pb2.DeleteBackupResponse.FromString, + ) self.GetBackups = channel.unary_unary( - '/Medusa/GetBackups', - request_serializer=medusa__pb2.GetBackupsRequest.SerializeToString, - response_deserializer=medusa__pb2.GetBackupsResponse.FromString, - ) + '/Medusa/GetBackups', + request_serializer=medusa__pb2.GetBackupsRequest.SerializeToString, + response_deserializer=medusa__pb2.GetBackupsResponse.FromString, + ) + self.PurgeBackups = channel.unary_unary( + '/Medusa/PurgeBackups', + request_serializer=medusa__pb2.PurgeBackupsRequest.SerializeToString, + response_deserializer=medusa__pb2.PurgeBackupsResponse.FromString, + ) + self.PrepareRestore = channel.unary_unary( + '/Medusa/PrepareRestore', + request_serializer=medusa__pb2.PrepareRestoreRequest.SerializeToString, + response_deserializer=medusa__pb2.PrepareRestoreResponse.FromString, + ) class MedusaServicer(object): @@ -74,125 +84,181 @@ def GetBackups(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def PurgeBackups(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PrepareRestore(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_MedusaServicer_to_server(servicer, server): rpc_method_handlers = { - 'Backup': grpc.unary_unary_rpc_method_handler( - servicer.Backup, - request_deserializer=medusa__pb2.BackupRequest.FromString, - response_serializer=medusa__pb2.BackupResponse.SerializeToString, - ), - 'AsyncBackup': grpc.unary_unary_rpc_method_handler( - servicer.AsyncBackup, - request_deserializer=medusa__pb2.BackupRequest.FromString, - response_serializer=medusa__pb2.BackupResponse.SerializeToString, - ), - 'BackupStatus': grpc.unary_unary_rpc_method_handler( - servicer.BackupStatus, - request_deserializer=medusa__pb2.BackupStatusRequest.FromString, - response_serializer=medusa__pb2.BackupStatusResponse.SerializeToString, - ), - 'DeleteBackup': grpc.unary_unary_rpc_method_handler( - servicer.DeleteBackup, - request_deserializer=medusa__pb2.DeleteBackupRequest.FromString, - response_serializer=medusa__pb2.DeleteBackupResponse.SerializeToString, - ), - 'GetBackups': grpc.unary_unary_rpc_method_handler( - servicer.GetBackups, - request_deserializer=medusa__pb2.GetBackupsRequest.FromString, - response_serializer=medusa__pb2.GetBackupsResponse.SerializeToString, - ), + 'Backup': grpc.unary_unary_rpc_method_handler( + servicer.Backup, + request_deserializer=medusa__pb2.BackupRequest.FromString, + response_serializer=medusa__pb2.BackupResponse.SerializeToString, + ), + 'AsyncBackup': grpc.unary_unary_rpc_method_handler( + servicer.AsyncBackup, + request_deserializer=medusa__pb2.BackupRequest.FromString, + response_serializer=medusa__pb2.BackupResponse.SerializeToString, + ), + 'BackupStatus': grpc.unary_unary_rpc_method_handler( + servicer.BackupStatus, + request_deserializer=medusa__pb2.BackupStatusRequest.FromString, + response_serializer=medusa__pb2.BackupStatusResponse.SerializeToString, + ), + 'DeleteBackup': grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=medusa__pb2.DeleteBackupRequest.FromString, + response_serializer=medusa__pb2.DeleteBackupResponse.SerializeToString, + ), + 'GetBackups': grpc.unary_unary_rpc_method_handler( + servicer.GetBackups, + request_deserializer=medusa__pb2.GetBackupsRequest.FromString, + response_serializer=medusa__pb2.GetBackupsResponse.SerializeToString, + ), + 'PurgeBackups': grpc.unary_unary_rpc_method_handler( + servicer.PurgeBackups, + request_deserializer=medusa__pb2.PurgeBackupsRequest.FromString, + response_serializer=medusa__pb2.PurgeBackupsResponse.SerializeToString, + ), + 'PrepareRestore': grpc.unary_unary_rpc_method_handler( + servicer.PrepareRestore, + request_deserializer=medusa__pb2.PrepareRestoreRequest.FromString, + response_serializer=medusa__pb2.PrepareRestoreResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( - 'Medusa', rpc_method_handlers) + 'Medusa', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) -# This class is part of an EXPERIMENTAL API. + # This class is part of an EXPERIMENTAL API. class Medusa(object): """Missing associated documentation comment in .proto file.""" @staticmethod def Backup(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): return grpc.experimental.unary_unary(request, target, '/Medusa/Backup', - medusa__pb2.BackupRequest.SerializeToString, - medusa__pb2.BackupResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + medusa__pb2.BackupRequest.SerializeToString, + medusa__pb2.BackupResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def AsyncBackup(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): return grpc.experimental.unary_unary(request, target, '/Medusa/AsyncBackup', - medusa__pb2.BackupRequest.SerializeToString, - medusa__pb2.BackupResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + medusa__pb2.BackupRequest.SerializeToString, + medusa__pb2.BackupResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def BackupStatus(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): return grpc.experimental.unary_unary(request, target, '/Medusa/BackupStatus', - medusa__pb2.BackupStatusRequest.SerializeToString, - medusa__pb2.BackupStatusResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + medusa__pb2.BackupStatusRequest.SerializeToString, + medusa__pb2.BackupStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteBackup(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): return grpc.experimental.unary_unary(request, target, '/Medusa/DeleteBackup', - medusa__pb2.DeleteBackupRequest.SerializeToString, - medusa__pb2.DeleteBackupResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + medusa__pb2.DeleteBackupRequest.SerializeToString, + medusa__pb2.DeleteBackupResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetBackups(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): return grpc.experimental.unary_unary(request, target, '/Medusa/GetBackups', - medusa__pb2.GetBackupsRequest.SerializeToString, - medusa__pb2.GetBackupsResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + medusa__pb2.GetBackupsRequest.SerializeToString, + medusa__pb2.GetBackupsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PurgeBackups(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/Medusa/PurgeBackups', + medusa__pb2.PurgeBackupsRequest.SerializeToString, + medusa__pb2.PurgeBackupsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PrepareRestore(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/Medusa/PrepareRestore', + medusa__pb2.PrepareRestoreRequest.SerializeToString, + medusa__pb2.PrepareRestoreResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/medusa/service/grpc/restore.py b/medusa/service/grpc/restore.py index 668baf3ba..74c92109c 100644 --- a/medusa/service/grpc/restore.py +++ b/medusa/service/grpc/restore.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import os import sys @@ -22,13 +23,13 @@ import medusa.config import medusa.restore_node import medusa.listing +from medusa.service.grpc.server import RESTORE_MAPPING_LOCATION def create_config(config_file_path): config_file = Path(config_file_path) - args = defaultdict(lambda: None) - - return medusa.config.load_config(args, config_file) + conf = medusa.config.load_config(defaultdict(lambda: None), config_file) + return conf def configure_console_logging(config): @@ -48,36 +49,61 @@ def configure_console_logging(config): logging.getLogger(logger_name).setLevel(logging.WARN) -if len(sys.argv) > 2: - config_file_path = sys.argv[2] -else: - config_file_path = "/etc/medusa/medusa.ini" - -config = create_config(config_file_path) -configure_console_logging(config.logging) - -backup_name = os.environ["BACKUP_NAME"] -tmp_dir = Path("/tmp") -in_place = True -keep_auth = False -seeds = None -verify = False -keyspaces = {} -tables = {} -use_sstableloader = False - -cluster_backups = medusa.listing.get_backups(config, False) -backup_found = False -# Checking if the backup exists for the node we're restoring. -# Skipping restore if it doesn't exist. -for cluster_backup in cluster_backups: - if cluster_backup.name == backup_name: - backup_found = True - logging.info("Starting restore of backup {}".format(backup_name)) - medusa.restore_node.restore_node(config, tmp_dir, backup_name, in_place, keep_auth, - seeds, verify, keyspaces, tables, use_sstableloader) - logging.info("Finished restore of backup {}".format(backup_name)) - break - -if not backup_found: - logging.info("Skipped restore of missing backup {}".format(backup_name)) +if __name__ == '__main__': + if len(sys.argv) > 3: + config_file_path = sys.argv[2] + restore_key = sys.argv[3] + else: + logging.error("Usage: {} ".format(sys.argv[0])) + sys.exit(1) + + in_place = True + if os.path.exists(f"{RESTORE_MAPPING_LOCATION}/{restore_key}"): + logging.info(f"Reading mapping file {RESTORE_MAPPING_LOCATION}/{restore_key}") + with open(f"{RESTORE_MAPPING_LOCATION}/{restore_key}", 'r') as f: + mapping = json.load(f) + # Mapping json structure will look like: + # {'in_place': true, + # 'host_map': + # {'172.24.0.3': {'source': ['172.24.0.3'], 'seed': False}, + # '127.0.0.1': {'source': ['172.24.0.4'], 'seed': False}, + # '172.24.0.6': {'source': ['172.24.0.6'], 'seed': False}}} + # As each mapping is specific to a Cassandra node, we're looking for the node that maps to 127.0.0.1, + # which will be different for each pod. + # If hostname resolving is turned on, we're looking for the localhost key instead. + if "localhost" in mapping["host_map"].keys(): + os.environ["POD_IP"] = mapping["host_map"]["localhost"]["source"][0] + elif "127.0.0.1" in mapping["host_map"].keys(): + os.environ["POD_IP"] = mapping["host_map"]["127.0.0.1"]["source"][0] + else: + os.environ["POD_IP"] = mapping["host_map"]["::1"]["source"][0] + in_place = mapping["in_place"] + + config = create_config(config_file_path) + configure_console_logging(config.logging) + + backup_name = os.environ["BACKUP_NAME"] + tmp_dir = Path("/tmp") + keep_auth = False if in_place else True + seeds = None + verify = False + keyspaces = {} + tables = {} + use_sstableloader = False + + cluster_backups = list(medusa.listing.get_backups(config, True)) + logging.info(f"Found {len(cluster_backups)} backups in the cluster") + backup_found = False + # Checking if the backup exists for the node we're restoring. + # Skipping restore if it doesn't exist. + for cluster_backup in cluster_backups: + if cluster_backup.name == backup_name: + backup_found = True + logging.info("Starting restore of backup {}".format(backup_name)) + medusa.restore_node.restore_node(config, tmp_dir, backup_name, in_place, keep_auth, + seeds, verify, keyspaces, tables, use_sstableloader) + logging.info("Finished restore of backup {}".format(backup_name)) + break + + if not backup_found: + logging.info("Skipped restore of missing backup {}".format(backup_name)) diff --git a/medusa/service/grpc/server.py b/medusa/service/grpc/server.py index b9df92132..119a31e6d 100644 --- a/medusa/service/grpc/server.py +++ b/medusa/service/grpc/server.py @@ -13,7 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging +import os import signal import sys from collections import defaultdict @@ -27,10 +29,12 @@ from grpc_health.v1 import health_pb2_grpc from medusa import backup_node +from medusa import purge from medusa.backup_manager import BackupMan from medusa.config import load_config from medusa.listing import get_backups from medusa.purge import delete_backup +from medusa.restore_cluster import RestoreJob from medusa.service.grpc import medusa_pb2 from medusa.service.grpc import medusa_pb2_grpc from medusa.storage import Storage @@ -38,6 +42,7 @@ TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S' BACKUP_MODE_DIFFERENTIAL = "differential" BACKUP_MODE_FULL = "full" +RESTORE_MAPPING_LOCATION = "/var/lib/cassandra/.restore_mapping" class Server: @@ -209,6 +214,7 @@ def GetBackups(self, request, context): for node in backup.tokenmap: summary.nodes.append(create_token_map_node(backup, node)) + summary.backupType = backup.backup_type response.backups.append(summary) except Exception as e: @@ -230,6 +236,52 @@ def DeleteBackup(self, request, context): logging.exception("Deleting backup {} failed".format(request.name)) return response + def PurgeBackups(self, request, context): + logging.info("Purging backups with max age {} and max count {}" + .format(self.config.storage.max_backup_age, self.config.storage.max_backup_count)) + response = medusa_pb2.PurgeBackupsResponse() + + try: + (nb_objects_purged, total_purged_size, total_objects_within_grace, nb_backups_purged) = purge.main( + self.config, + max_backup_age=int(self.config.storage.max_backup_age), + max_backup_count=int(self.config.storage.max_backup_count)) + response.nbObjectsPurged = nb_objects_purged + response.totalPurgedSize = total_purged_size + response.totalObjectsWithinGcGrace = total_objects_within_grace + response.nbBackupsPurged = nb_backups_purged + + except Exception as e: + context.set_details("purging backups failed: {}".format(e)) + context.set_code(grpc.StatusCode.INTERNAL) + logging.exception("Purging backups failed") + return response + + def PrepareRestore(self, request, context): + logging.info("Preparing restore {} for backup {}".format(request.restoreKey, request.backupName)) + response = medusa_pb2.PrepareRestoreResponse() + try: + backups = get_backups(self.config, True) + for cluster_backup in backups: + if cluster_backup.name == request.backupName: + restore_job = RestoreJob(cluster_backup, + self.config, Path("/tmp"), + None, + "127.0.0.1", + True, + False, + 1, + bypass_checks=True) + restore_job.prepare_restore() + os.makedirs(RESTORE_MAPPING_LOCATION, exist_ok=True) + with open(f"{RESTORE_MAPPING_LOCATION}/{request.restoreKey}", "w") as f: + f.write(json.dumps({'in_place': restore_job.in_place, 'host_map': restore_job.host_map})) + except Exception as e: + context.set_details("Failed to prepare restore: {}".format(e)) + context.set_code(grpc.StatusCode.INTERNAL) + logging.exception("Failed restore prep {} for backup {}".format(request.restoreKey, request.backupName)) + return response + # Callback function for recording unique backup results def record_backup_info(future): diff --git a/medusa/storage/cluster_backup.py b/medusa/storage/cluster_backup.py index 39f156852..182d85fb4 100644 --- a/medusa/storage/cluster_backup.py +++ b/medusa/storage/cluster_backup.py @@ -61,6 +61,10 @@ def schema(self): self._schema = self._first_nodebackup.schema return self._schema + @property + def backup_type(self): + return "differential" if self._first_nodebackup.is_differential else "full" + def is_complete(self): return not self.missing_nodes() and all(map(operator.attrgetter('finished'), self.node_backups.values())) diff --git a/medusa/storage/google_storage.py b/medusa/storage/google_storage.py index bc0779d7c..9643e8e69 100644 --- a/medusa/storage/google_storage.py +++ b/medusa/storage/google_storage.py @@ -105,7 +105,10 @@ def _download_paths(self, gsutil, parent, src_paths, old_dest): # we made src_paths a list of Path objects, but we need strings for copying # plus, we must not forget to point them to the bucket srcs = ['gs://{}/{}'.format(self.bucket.name, str(p)) for p in src_paths] - return gsutil.cp(srcs=srcs, dst=new_dest) + return gsutil.cp( + srcs=srcs, + dst=new_dest, + parallel_process_count=self.config.concurrent_transfers) def get_object_datetime(self, blob): logging.debug("Blob {} last modification time is {}".format(blob.name, blob.extra["last_modified"])) diff --git a/requirements.txt b/requirements.txt index 91651505c..5a2c2b8fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ cryptography<=3.3.2,>=2.5 pycryptodome>=3.9.9 retrying>=1.3.3 ssh2-python==0.22.0 -ssh-python>=0.6.0 +ssh-python>=0.8.0 parallel-ssh==2.2.0 requests==2.22.0 wheel>=0.32.0 @@ -20,3 +20,4 @@ greenlet fasteners==0.16 datadog botocore>=1.13.27 +dnspython>=2.2.1 diff --git a/run_integration_tests.sh b/run_integration_tests.sh index 0fe305ca1..ce8a05f91 100755 --- a/run_integration_tests.sh +++ b/run_integration_tests.sh @@ -26,6 +26,7 @@ AZURE="no" IBM="no" MINIO="no" LOGGING_FLAGS="" +COVERAGE="yes" while test $# -gt 0; do case "$1" in @@ -43,6 +44,7 @@ while test $# -gt 0; do echo "--ibm Include IBM in the storage backends" echo "--minio Include MinIO in the storage backends" echo "--cassandra-version Cassandra version to test" + echo "--no-coverage Disable coverage evaluation" echo "-v Verbose output (logging won't be captured by behave)" exit 0 ;; @@ -97,6 +99,10 @@ while test $# -gt 0; do CASSANDRA_VERSION=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; + --no-coverage) + COVERAGE="no" + shift + ;; *) break ;; @@ -172,4 +178,9 @@ else CASSANDRA_VERSION_FLAG="-D cassandra-version=${CASSANDRA_VERSION}" fi -PYTHONPATH=../.. coverage run --source='../../medusa' -m behave --stop $SCENARIO --tags=$STORAGE_TAGS $LOGGING $CASSANDRA_VERSION_FLAG +if [ "$COVERAGE" == "yes" ] +then + PYTHONPATH=../.. coverage run --source='../../medusa' -m behave --stop $SCENARIO --tags=$STORAGE_TAGS $LOGGING $CASSANDRA_VERSION_FLAG +else + PYTHONPATH=../.. python3 -m behave --stop $SCENARIO --tags=$STORAGE_TAGS $LOGGING $CASSANDRA_VERSION_FLAG +fi diff --git a/setup.py b/setup.py index caa8a96f8..c4a8588ce 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ 'retrying>=1.3.3', 'parallel-ssh==2.2.0', 'ssh2-python==0.22.0', - 'ssh-python>=0.6.0', + 'ssh-python>=0.8.0', 'requests==2.22.0', 'protobuf>=3.12.0', 'grpcio>=1.29.0', @@ -66,6 +66,7 @@ 'fasteners==0.16', 'datadog', 'botocore>=1.13.27', + 'dnspython>=2.2.1', ], extras_require={ 'S3': ["awscli>=1.16.291"], diff --git a/tests/config_test.py b/tests/config_test.py index e4f869e40..56acc040f 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -109,6 +109,7 @@ def test_args_settings_override(self): 'query': 'SELECT * FROM greek_mythology', 'use_mgmt_api': 'True', 'username': 'Zeus', + 'fqdn': 'localhost', } config = medusa.config.load_config(args, self.medusa_config_file) assert config.storage.bucket_name == 'Hector' diff --git a/tests/integration/features/integration_tests.feature b/tests/integration/features/integration_tests.feature index 548e0c088..a4f563fa3 100644 --- a/tests/integration/features/integration_tests.feature +++ b/tests/integration/features/integration_tests.feature @@ -699,14 +699,19 @@ Feature: Integration tests When I create the "test" table in keyspace "medusa" When I load 100 rows in the "medusa.test" table When I run a "ccm node1 nodetool -- -Dcom.sun.jndi.rmiURLParsing=legacy flush" command - When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_0" + When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_2" Then the backup index exists - Then I verify over gRPC that the backup "grpc_backup_0" exists - Then I can see the backup index entry for "grpc_backup_0" - Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_0" - Then I verify over gRPC that the backup "grpc_backup_0" has expected status SUCCESS - Then I delete the backup "grpc_backup_0" over gRPC - Then I verify over gRPC the backup "grpc_backup_0" does not exist + Then I verify over gRPC that the backup "grpc_backup_2" exists and is of type "differential" + Then I can see the backup index entry for "grpc_backup_2" + Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_2" + Then I wait for 10 seconds + When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_2_2" + Then I verify over gRPC that the backup "grpc_backup_2_2" exists and is of type "differential" + Then I can see the backup index entry for "grpc_backup_2_2" + Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_2_2" + When I perform a purge over gRPC + Then 1 backup has been purged + Then I verify over gRPC that the backup "grpc_backup_2" does not exist Then I shutdown the gRPC server @local @@ -724,12 +729,12 @@ Feature: Integration tests When I run a "ccm node1 nodetool -- -Dcom.sun.jndi.rmiURLParsing=legacy flush" command When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_1" Then the backup index exists - Then I verify over gRPC that the backup "grpc_backup_1" exists + Then I verify over gRPC that the backup "grpc_backup_1" exists and is of type "differential" And I verify over gRPC that the backup "grpc_backup_1" has the expected placement information When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_1" and it fails Then I delete the backup "grpc_backup_1" over gRPC Then I delete the backup "grpc_backup_1" over gRPC and it fails - Then I verify over gRPC the backup "grpc_backup_1" does not exist + Then I verify over gRPC that the backup "grpc_backup_1" does not exist Then I shutdown the gRPC server @local @@ -738,7 +743,7 @@ Feature: Integration tests | local | with_client_encryption | @18 @skip-cassandra-2 - Scenario Outline: Perform a differential backup over gRPC , verify its index, then delete it over gRPC with management API + Scenario Outline: Perform differential backups over gRPC , verify its index, then delete it over gRPC with management API Given I have a fresh ccm cluster with mgmt api "" named "scenario18" Given I am using "" as storage provider in ccm cluster "" with mgmt api Then the gRPC server is up @@ -747,11 +752,17 @@ Feature: Integration tests When I run a "ccm node1 nodetool -- -Dcom.sun.jndi.rmiURLParsing=legacy flush" command When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_2" Then the backup index exists - Then I verify over gRPC that the backup "grpc_backup_2" exists + Then I verify over gRPC that the backup "grpc_backup_2" exists and is of type "differential" Then I can see the backup index entry for "grpc_backup_2" Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_2" - Then I delete the backup "grpc_backup_2" over gRPC - Then I verify over gRPC the backup "grpc_backup_2" does not exist + Then I wait for 10 seconds + When I perform a backup over gRPC in "differential" mode of the node named "grpc_backup_2_2" + Then I verify over gRPC that the backup "grpc_backup_2_2" exists and is of type "differential" + Then I can see the backup index entry for "grpc_backup_2_2" + Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_2_2" + When I perform a purge over gRPC + Then 1 backup has been purged + Then I verify over gRPC that the backup "grpc_backup_2" does not exist Then I shutdown the gRPC server Then I shutdown the mgmt api server @@ -906,12 +917,12 @@ Feature: Integration tests When I run a "ccm node1 nodetool -- -Dcom.sun.jndi.rmiURLParsing=legacy flush" command When I perform an async backup over gRPC in "differential" mode of the node named "grpc_backup_23" Then the backup index exists - Then I verify over gRPC that the backup "grpc_backup_23" exists + Then I verify over gRPC that the backup "grpc_backup_23" exists and is of type "differential" Then I can see the backup index entry for "grpc_backup_23" Then I can see the latest backup for "127.0.0.1" being called "grpc_backup_23" Then I verify over gRPC that the backup "grpc_backup_23" has expected status SUCCESS Then I delete the backup "grpc_backup_23" over gRPC - Then I verify over gRPC the backup "grpc_backup_23" does not exist + Then I verify over gRPC that the backup "grpc_backup_23" does not exist Then I verify that backup manager has removed the backup "grpc_backup_23" Then I shutdown the gRPC server diff --git a/tests/integration/features/steps/integration_steps.py b/tests/integration/features/steps/integration_steps.py index 21538cbc9..c831b5b38 100644 --- a/tests/integration/features/steps/integration_steps.py +++ b/tests/integration/features/steps/integration_steps.py @@ -330,6 +330,8 @@ def _i_have_a_fresh_ccm_cluster_with_mgmt_api_running(context, cluster_name, cli @given(r'I am using "{storage_provider}" as storage provider in ccm cluster "{client_encryption}"') def i_am_using_storage_provider(context, storage_provider, client_encryption): + context.storage_provider = storage_provider + context.client_encryption = client_encryption context.medusa_config = get_medusa_config(context, storage_provider, client_encryption, None) cleanup_storage(context, storage_provider) cleanup_monitoring(context) @@ -340,6 +342,8 @@ def i_am_using_storage_provider_with_grpc_server(context, storage_provider, clie config = parse_medusa_config(context, storage_provider, client_encryption, "http://127.0.0.1:8778/jolokia/", grpc=1, use_mgmt_api=1) + context.storage_provider = storage_provider + context.client_encryption = client_encryption context.grpc_server = GRPCServer(config) context.grpc_client = medusa.service.grpc.client.Client( "127.0.0.1:50051", @@ -369,8 +373,9 @@ def i_am_using_storage_provider_with_grpc_server_and_mgmt_api(context, storage_p config = parse_medusa_config(context, storage_provider, client_encryption, "http://127.0.0.1:8080/api/v0/ops/node/snapshots", use_mgmt_api=1, grpc=1) + context.storage_provider = storage_provider + context.client_encryption = client_encryption context.grpc_server = GRPCServer(config) - context.grpc_client = medusa.service.grpc.client.Client( "127.0.0.1:50051", channel_options=[('grpc.enable_retries', 0)] @@ -563,12 +568,12 @@ def _i_perform_grpc_backup_of_node_named_backupname_fails(context, backup_mode, pass -@then(r'I verify over gRPC that the backup "{backup_name}" exists') -def _i_verify_over_grpc_backup_exists(context, backup_name): +@then(r'I verify over gRPC that the backup "{backup_name}" exists and is of type "{backup_type}"') +def _i_verify_over_grpc_backup_exists(context, backup_name, backup_type): found = False backups = context.grpc_client.get_backups() for backup in backups: - if backup.backupName == backup_name: + if backup.backupName == backup_name and backup.backupType == backup_type: found = True break assert found is True @@ -628,7 +633,7 @@ def _i_delete_backup_grpc_fail(context, backup_name): pass -@then(r'I verify over gRPC the backup "{backup_name}" does not exist') +@then(r'I verify over gRPC that the backup "{backup_name}" does not exist') def _i_verify_over_grpc_backup_does_not_exist(context, backup_name): assert not context.grpc_client.backup_exists(backup_name) @@ -1269,6 +1274,21 @@ def _i_can_fecth_tokenmap_of_backup_named(context, backup_name): assert "127.0.0.1" in tokenmap +@when(r'I perform a purge over gRPC') +def _i_perform_a_purge_over_grpc_with_a_max_backup_count(context): + context.purge_result = context.grpc_client.purge_backups() + + +@then(r'{nb_purged_backups} backup has been purged') +def _backup_has_been_purged(context, nb_purged_backups): + assert context.purge_result.nbBackupsPurged == int(nb_purged_backups) + + +@then(r'I wait for {pause_duration} seconds') +def _i_wait_for_seconds(context, pause_duration): + time.sleep(int(pause_duration)) + + def connect_cassandra(is_client_encryption_enable, tls_version=PROTOCOL_TLS): connected = False attempt = 0 diff --git a/tests/network/hostname_resolver_test.py b/tests/network/hostname_resolver_test.py index 5c960200b..f8acc1a9f 100644 --- a/tests/network/hostname_resolver_test.py +++ b/tests/network/hostname_resolver_test.py @@ -14,12 +14,22 @@ # limitations under the License. import unittest -from unittest.mock import patch +from unittest.mock import patch, MagicMock, Mock from medusa.network.hostname_resolver import HostnameResolver mock_fqdn = "k8ssandra-dc1-default-sts-0.k8ssandra-dc1-all-pods-service.k8ssandra2022040617103007.svc.cluster.local" +mock_invalid_fqdn = "127-0-0-1.k8ssandra-dc1-all-pods-service.k8ssandra2022040617103007.svc.cluster.local" +mock_invalid_ipv6_fqdn = "2001-db8-85a3-8d3-1319-8a2e-370-7348.k8ssandra-dc1-all-pods-service.test.svc.cluster.local" mock_alias = "k8ssandra-dc1-default-sts-0" +mock_resolve = Mock() +mock_resolve.to_text = MagicMock(return_value=mock_fqdn) +mock_resolve_invalid = Mock() +mock_resolve_invalid.to_text = MagicMock(return_value=mock_invalid_fqdn) +mock_resolve_invalid_ipv6 = Mock() +mock_resolve_invalid_ipv6.to_text = MagicMock(return_value=mock_invalid_ipv6_fqdn) +mock_reverse = Mock() +mock_reverse.to_text = MagicMock(return_value="1.0.0.127-in-addr.arpa.") class HostnameResolverTest(unittest.TestCase): @@ -36,11 +46,51 @@ def test_address_resolving(self): def test_address_for_kubernetes(self): with patch('medusa.network.hostname_resolver.socket') as mock_socket: - mock_socket.getfqdn.return_value = mock_fqdn - hostname_resolver = HostnameResolver(resolve_addresses=True, k8s_mode=True) - self.assertEqual( - mock_alias, - hostname_resolver.resolve_fqdn("127.0.0.1")) + with patch('medusa.network.hostname_resolver.dns.resolver') as mock_resolver: + with patch('medusa.network.hostname_resolver.dns.reversename') as mock_reverser: + mock_socket.getfqdn.return_value = mock_fqdn + mock_resolver.resolve.return_value = [mock_resolve_invalid, mock_resolve_invalid_ipv6, mock_resolve] + mock_reverser.reverse.return_value = mock_reverse + hostname_resolver = HostnameResolver(resolve_addresses=True, k8s_mode=True) + self.assertEqual( + mock_alias, + hostname_resolver.resolve_fqdn("127.0.0.1")) + + def test_invalid_address_for_kubernetes(self): + with patch('medusa.network.hostname_resolver.socket') as mock_socket: + with patch('medusa.network.hostname_resolver.dns.resolver') as mock_resolver: + with patch('medusa.network.hostname_resolver.dns.reversename') as mock_reverser: + mock_socket.getfqdn.return_value = mock_invalid_fqdn + mock_resolver.resolve.return_value = [mock_resolve_invalid_ipv6] + mock_reverser.reverse.return_value = mock_reverse + hostname_resolver = HostnameResolver(resolve_addresses=True, k8s_mode=True) + self.assertNotEqual( + mock_alias, + hostname_resolver.resolve_fqdn("127.0.0.1")) + + def test_valid_address_for_kubernetes_ipv6(self): + with patch('medusa.network.hostname_resolver.socket') as mock_socket: + with patch('medusa.network.hostname_resolver.dns.resolver') as mock_resolver: + with patch('medusa.network.hostname_resolver.dns.reversename') as mock_reverser: + mock_socket.getfqdn.return_value = mock_invalid_fqdn + mock_resolver.resolve.return_value = [mock_resolve] + mock_reverser.reverse.return_value = mock_reverse + hostname_resolver = HostnameResolver(resolve_addresses=True, k8s_mode=True) + self.assertEqual( + mock_alias, + hostname_resolver.resolve_fqdn("::1")) + + def test_invalid_address_for_kubernetes_ipv6(self): + with patch('medusa.network.hostname_resolver.socket') as mock_socket: + with patch('medusa.network.hostname_resolver.dns.resolver') as mock_resolver: + with patch('medusa.network.hostname_resolver.dns.reversename') as mock_reverser: + mock_socket.getfqdn.return_value = mock_invalid_fqdn + mock_resolver.resolve.return_value = [mock_resolve_invalid_ipv6] + mock_reverser.reverse.return_value = mock_reverse + hostname_resolver = HostnameResolver(resolve_addresses=True, k8s_mode=True) + self.assertNotEqual( + mock_alias, + hostname_resolver.resolve_fqdn("::1")) def test_address_no_kubernetes(self): with patch('medusa.network.hostname_resolver.socket') as mock_socket: diff --git a/tests/resources/config/medusa-azure_blobs.ini b/tests/resources/config/medusa-azure_blobs.ini index ff356e62b..bce7894dd 100644 --- a/tests/resources/config/medusa-azure_blobs.ini +++ b/tests/resources/config/medusa-azure_blobs.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = medusa-integration-tests key_file = ~/medusa_azure_credentials.json @@ -12,6 +14,8 @@ prefix = storage_prefix multi_part_upload_threshold = 1024 concurrent_transfers = 4 backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local + diff --git a/tests/resources/config/medusa-google_storage.ini b/tests/resources/config/medusa-google_storage.ini index ab1fead59..b741c98de 100644 --- a/tests/resources/config/medusa-google_storage.ini +++ b/tests/resources/config/medusa-google_storage.ini @@ -1,7 +1,10 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false + host_file_separator = "," bucket_name = medusa-integration-tests key_file = ~/medusa_credentials.json @@ -10,6 +13,7 @@ fqdn = 127.0.0.1 base_path = /tmp prefix = storage_prefix backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa-ibm_storage.ini b/tests/resources/config/medusa-ibm_storage.ini index f5f883abf..85ab432eb 100644 --- a/tests/resources/config/medusa-ibm_storage.ini +++ b/tests/resources/config/medusa-ibm_storage.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = medusa-experiment-2 key_file = ~/.aws/ibm_credentials @@ -14,6 +16,7 @@ region = eu-smart transfer_max_bandwidth = "1MB/s" secure = True backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa-kubernetes.ini b/tests/resources/config/medusa-kubernetes.ini index 47cf0bd77..f150a5c36 100644 --- a/tests/resources/config/medusa-kubernetes.ini +++ b/tests/resources/config/medusa-kubernetes.ini @@ -5,17 +5,20 @@ nodetool_version_cmd = nodetool -Dcom.sun.jndi.rmiURLParsing=legacy version cql_username = test_username cql_password = test_password nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false storage_provider = bucket_name = cassandra_backups key_file = /etc/medusa/credentials max_backup_age = 0 -max_backup_count = 0 +max_backup_count = 1 transfer_max_bandwidth = 50MB/s concurrent_transfers = 1 multi_part_upload_threshold = 104857600 backup_grace_period_in_days = 0 +fqdn = localhost [kubernetes] enabled = true diff --git a/tests/resources/config/medusa-local.ini b/tests/resources/config/medusa-local.ini index 97797359b..17d02b66c 100644 --- a/tests/resources/config/medusa-local.ini +++ b/tests/resources/config/medusa-local.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = medusa_it_bucket storage_provider = local @@ -9,6 +11,7 @@ fqdn = 127.0.0.1 base_path = /tmp prefix = storage_prefix backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa-local_backup_gc_grace.ini b/tests/resources/config/medusa-local_backup_gc_grace.ini index e062bda75..ba3bf9358 100644 --- a/tests/resources/config/medusa-local_backup_gc_grace.ini +++ b/tests/resources/config/medusa-local_backup_gc_grace.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = medusa_it_bucket storage_provider = local @@ -9,6 +11,7 @@ fqdn = 127.0.0.1 base_path = /tmp prefix = storage_prefix backup_grace_period_in_days = 1 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa-minio.ini b/tests/resources/config/medusa-minio.ini index ed4d6476c..50699bce9 100644 --- a/tests/resources/config/medusa-minio.ini +++ b/tests/resources/config/medusa-minio.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = medusa-dev key_file = ~/.aws/minio_credentials @@ -17,6 +19,7 @@ region = default host = localhost port = 9000 backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa-s3_us_west_oregon.ini b/tests/resources/config/medusa-s3_us_west_oregon.ini index 09f289d86..92a5586be 100644 --- a/tests/resources/config/medusa-s3_us_west_oregon.ini +++ b/tests/resources/config/medusa-s3_us_west_oregon.ini @@ -1,7 +1,9 @@ [cassandra] nodetool_flags = "-Dcom.sun.jndi.rmiURLParsing=legacy" +use_sudo = false [storage] +use_sudo_for_restore = false host_file_separator = "," bucket_name = tlp-medusa-dev key_file = ~/.aws/medusa_credentials @@ -12,6 +14,7 @@ multi_part_upload_threshold = 1024 aws_cli_path = aws concurrent_transfers = 4 backup_grace_period_in_days = 0 +max_backup_count = 1 [monitoring] monitoring_provider = local diff --git a/tests/resources/config/medusa.ini b/tests/resources/config/medusa.ini index 2c24f0b91..c1304ba6b 100644 --- a/tests/resources/config/medusa.ini +++ b/tests/resources/config/medusa.ini @@ -17,7 +17,7 @@ storage_provider = bucket_name = cassandra_backups key_file = /etc/medusa/credentials max_backup_age = 0 -max_backup_count = 0 +max_backup_count = 1 transfer_max_bandwidth = 50MB/s concurrent_transfers = 1 multi_part_upload_threshold = 104857600 diff --git a/tox.ini b/tox.ini index d65658c2a..537db4d3c 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,7 @@ deps = commands = python setup.py check -m -s - flake8 . --ignore=W503,E402 --exclude=medusa/service/grpc/medusa_pb2.py,.tox,venv,build + flake8 . --ignore=W503,E402 --exclude=medusa/service/grpc/medusa_pb2.py,medusa/service/grpc/medusa_pb2_grpc.py,.tox,venv,build,dist,debian pytest --cov=medusa --cov-report=xml -v {posargs:tests/} [flake8]