diff --git a/DB/NEW_KT_DB/Controller/DBClusterController.py b/DB/NEW_KT_DB/Controller/DBClusterController.py index b253975b..407d4ab8 100644 --- a/DB/NEW_KT_DB/Controller/DBClusterController.py +++ b/DB/NEW_KT_DB/Controller/DBClusterController.py @@ -1,18 +1,97 @@ -from Service import DBClusterService - +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from KT_Cloud.Storage.NEW_KT_Storage.DataAccess import StorageManager +from Service.Classes.DBClusterService import DBClusterService +from DataAccess import DBClusterManager +from Controller import DBInstanceController +from Service.Classes.DBInstanceService import DBInstanceService +from DataAccess import DBInstanceManager +from DataAccess import ObjectManager class DBClusterController: - def __init__(self, service: DBClusterService): + def __init__(self, service: DBClusterService, instance_controller:DBInstanceController): self.service = service + self.instance_controller = instance_controller def create_db_cluster(self, **kwargs): - self.service.create(**kwargs) + self.service.create(self.instance_controller,**kwargs) + + + def delete_db_cluster(self , cluster_identifier): + self.service.delete(self.instance_controller, cluster_identifier) + + + def modify_db_cluster(self, cluster_identifier, **updates): + self.service.modify(cluster_identifier,**updates) + + def describe_db_cluster(self, cluster_id): + return self.service.describe(cluster_id) + + def get_all_db_clusters(self): + return self.service.get_all_cluster() + + + +if __name__=='__main__': + + # desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop') + # cluster_directory = os.path.join(desktop_path, f'Clusters/clusters.db') + # base = os.path.join(desktop_path, f'Clusters') + storage_manager = StorageManager.StorageManager('Instances') + db_file = ObjectManager.ObjectManager('Clusters/instances.db') + instance_manager = DBInstanceManager.DBInstanceManager(db_file) + instanceService = DBInstanceService(instance_manager) + instanceController = DBInstanceController.DBInstanceController(instanceService) + + storage_manager = StorageManager.StorageManager('Clusters') + clusterManager = DBClusterManager.DBClusterManager('Clusters/clusters.db') + clusterService = DBClusterService(clusterManager,storage_manager, 'Clusters') + clusterController = DBClusterController(clusterService,instanceController) + + # storage_manager = StorageManager.StorageManager('Instances') + # db_file = ObjectManager.ObjectManager('Clusters/instances.db') + # instance_manager = DBInstanceManager.DBInstanceManager(db_file) + # instanceService = DBInstanceService(instance_manager) + # instanceController = DBInstanceController.DBInstanceController(instanceService) + cluster_data = { + 'db_cluster_identifier': 'Cluster27', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + # clusterController.create_db_cluster(**cluster_data) + clusterController.delete_db_cluster('ClusterTest') + + # aa = clusterController.get_all_db_clusters() + # print(aa) + update_data = { + 'engine': 'postgres', + 'allocated_storage':3, + } + # clusterController.modify_db_cluster('myCluster1', **update_data) + + # dfgh = clusterController.describe_db_cluster('myCluster3') + # print(dfgh) + columns = ['db_cluster_identifier', 'engine', 'allocated_storage', 'copy_tags_to_snapshot', + 'db_cluster_instance_class', 'database_name', 'db_cluster_parameter_group_name', + 'db_subnet_group_name', 'deletion_protection', 'engine_version', 'master_username', + 'master_user_password', 'manage_master_user_password', 'option_group_name', 'port', + 'replication_source_identifier', 'storage_encrypted', 'storage_type', 'tags', + 'created_at', 'status', 'primary_writer_instance', 'reader_instances', 'cluster_endpoint', + 'instances_endpoints', 'pk_column', 'pk_value'] + + #update configurations + # current_cluster = clusterController.describe_db_cluster('Cluster26') + # print(type(current_cluster)) + # cluster_dict = dict(zip(columns, current_cluster[0])) + # print(type(cluster_dict['reader_instances'])) + # if cluster_dict['reader_instances']!='[]': + # for id in cluster_dict['reader_instances']: + # print(id) - def delete_db_cluster(self): - self.service.delete() - def modify_db_cluster(self, updates): - self.service.modify(updates) - \ No newline at end of file diff --git a/DB/NEW_KT_DB/DataAccess/DBClusterManager.py b/DB/NEW_KT_DB/DataAccess/DBClusterManager.py index 802c229f..bf20a262 100644 --- a/DB/NEW_KT_DB/DataAccess/DBClusterManager.py +++ b/DB/NEW_KT_DB/DataAccess/DBClusterManager.py @@ -2,27 +2,81 @@ import json import sqlite3 from DataAccess import ObjectManager +from Models.DBClusterModel import Cluster +from typing import Optional class DBClusterManager: def __init__(self, db_file: str): '''Initialize ObjectManager with the database connection.''' - self.object_manager = ObjectManager(db_file) - self.table_name ='cluster_managment' - self.create_table() + self.object_manager = ObjectManager.ObjectManager(db_file) + self.object_name ='cluster' + self.pk_column = 'db_cluster_identifier' + self.table_schema = """ db_cluster_identifier TEXT PRIMARY KEY, + engine TEXT, + allocated_storage INTEGER, + copy_tags_to_snapshot BOOLEAN, + db_cluster_instance_class TEXT, + database_name TEXT, + db_cluster_parameter_group_name TEXT, + db_subnet_group_name TEXT, + deletion_protection BOOLEAN, + engine_version TEXT, + master_username TEXT, + master_user_password TEXT, + manage_master_user_password BOOLEAN, + option_group_name TEXT, + port INTEGER, + replication_source_identifier TEXT, + storage_encrypted BOOLEAN, + storage_type TEXT, + tags TEXT, + created_at TEXT, + status TEXT, + primary_writer_instance TEXT, + reader_instances TEXT, + cluster_endpoint TEXT, + instances_endpoints TEXT, + pk_column TEXT, + pk_value TEXT + """ + self.object_manager.create_management_table(self.object_name, table_structure = self.table_schema) + def createInMemoryDBCluster(self, cluster_to_save): + self.object_manager.save_in_memory(self.object_name, cluster_to_save) - def createInMemoryDBCluster(self): - self.object_manager.save_in_memory() + def deleteInMemoryDBCluster(self,cluster_identifier): + self.object_manager.delete_from_memory_by_pk(self.object_name, self.pk_column, cluster_identifier) - def deleteInMemoryDBCluster(self): - self.object_manager.delete_from_memory() + def describeDBCluster(self, cluster_id): + return self.object_manager.get_from_memory(self.object_name, criteria=f" {self.pk_column} = '{cluster_id}'") + def modifyDBCluster(self, cluster_id, updates): + self.object_manager.update_in_memory(self.object_name, updates, criteria=f" {self.pk_column} = '{cluster_id}'") - def describeDBCluster(self): - self.object_manager.get_from_memory() + def get(self, cluster_id: str): + data = self.object_manager.get_from_memory(self.object_name, criteria=f" {self.pk_column} = '{cluster_id}'") + if data: + data_mapping = {'db_cluster_identifier':cluster_id} + for key, value in data[cluster_id].items(): + data_mapping[key] = value + return Cluster(**data_mapping) + else: + None + def is_db_instance_exist(self, db_cluster_identifier: int) -> bool: + """ + Check if a DBInstance with the given identifier exists in memory. - def modifyDBCluster(self): - self.object_manager.update_in_memory() - + Params: db_instance_identifier: The primary key (ID) of the DBInstance to check. + + Return: True if the DBInstance exists, otherwise False. + """ + # Check if the object exists by its primary key in the management table + return bool(self.object_manager.db_manager.is_object_exist( + self.object_manager._convert_object_name_to_management_table_name(self.object_name), + criteria=f"{self.pk_column} = '{db_cluster_identifier}'" + )) + + def get_all_clusters(self): + return self.object_manager.get_all_objects_from_memory(self.object_name) \ No newline at end of file diff --git a/DB/NEW_KT_DB/DataAccess/DBManager.py b/DB/NEW_KT_DB/DataAccess/DBManager.py index bbd45fff..06314d32 100644 --- a/DB/NEW_KT_DB/DataAccess/DBManager.py +++ b/DB/NEW_KT_DB/DataAccess/DBManager.py @@ -204,4 +204,4 @@ def describe_table(self, table_name: str) -> Dict[str, str]: else: raise Exception(f'table {table_name} not found') except OperationalError as e: - raise Exception(f'Error describing table {table_name}: {e}') \ No newline at end of file + raise Exception(f'Error describing table {table_name}: {e}') diff --git a/DB/NEW_KT_DB/DataAccess/ObjectManager.py b/DB/NEW_KT_DB/DataAccess/ObjectManager.py index b75b2373..d8f6e244 100644 --- a/DB/NEW_KT_DB/DataAccess/ObjectManager.py +++ b/DB/NEW_KT_DB/DataAccess/ObjectManager.py @@ -1,12 +1,14 @@ from typing import Dict, Any, Optional import json import sqlite3 -from DBManager import DBManager +import sys +import os +from DataAccess import DBManager class ObjectManager: def __init__(self, db_file: str): '''Initialize ObjectManager with the database connection.''' - self.db_manager = DBManager(db_file) + self.db_manager = DBManager.DBManager(db_file) def create_management_table(self, object_name, table_structure='default', pk_column_data_type='INTEGER'): diff --git a/DB/NEW_KT_DB/Exception/exception.py b/DB/NEW_KT_DB/Exception/exception.py new file mode 100644 index 00000000..baa9404c --- /dev/null +++ b/DB/NEW_KT_DB/Exception/exception.py @@ -0,0 +1,59 @@ +class AlreadyExistsError(Exception): + """Raised when an object already exists.""" + pass + + +class DatabaseCreationError(Exception): + """Raised when there is an error creating the database.""" + pass + + +class ConnectionError(Exception): + """Raised when a connection to the database fails.""" + pass + + +class DatabaseNotFoundError(Exception): + """Raised when a database is not found.""" + pass + + +class DBInstanceNotFoundError(Exception): + """Raised when a database instance is not found.""" + pass + + +class MissingRequireParamError(Exception): + """Raised when a required parameter in a function is missing.""" + pass + + +class InvalidDBInstanceStateError(Exception): + """Raised when trying to perform an operation on DBInstaace when it is not in the appropriate status for this operation""" + pass + + +class ParamValidationError(Exception): + pass + + +class StartNonStoppedDBInstance(Exception): + """Raised when trying to start non stopped db instance""" + pass + + +class DatabaseCloneError(Exception): + """Custom exception for database cloning errors.""" + pass + +class DbSnapshotIdentifierNotFoundError(Exception): + """Raised when a db snapshot identifier is not found.""" + pass + +class InvalidQueryError(Exception): + """Raised when a query is not properly constructed or contains syntax errors.""" + pass + +class InvalidQueryError(Exception): + """Raised when a query is not properly constructed or contains syntax errors.""" + pass diff --git a/DB/NEW_KT_DB/Models/DBClusterModel.py b/DB/NEW_KT_DB/Models/DBClusterModel.py index 23170019..850c5439 100644 --- a/DB/NEW_KT_DB/Models/DBClusterModel.py +++ b/DB/NEW_KT_DB/Models/DBClusterModel.py @@ -1,75 +1,109 @@ from datetime import datetime from typing import Dict +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from DataAccess import ObjectManager - +import json class Cluster: def __init__(self, **kwargs): self.db_cluster_identifier = kwargs['db_cluster_identifier'] self.engine = kwargs['engine'] - self.availability_zones = kwargs.get('availability_zones', None) + self.allocated_storage = kwargs['allocated_storage'] self.copy_tags_to_snapshot = kwargs.get('copy_tags_to_snapshot', False) + self.db_cluster_instance_class = kwargs.get('db_cluster_instance_class', False) self.database_name = kwargs.get('database_name', None) self.db_cluster_parameter_group_name = kwargs.get('db_cluster_parameter_group_name', None) self.db_subnet_group_name = kwargs.get('db_subnet_group_name', None) self.deletion_protection = kwargs.get('deletion_protection', False) - self.enable_cloudwatch_logs_exports = kwargs.get('enable_cloudwatch_logs_exports', None) - self.enable_global_write_forwarding = kwargs.get('enable_global_write_forwarding', False) - self.enable_http_endpoint = kwargs.get('enable_http_endpoint', False) - self.enable_limitless_database = kwargs.get('enable_limitless_database', False) - self.enable_local_write_forwarding = kwargs.get('enable_local_write_forwarding', False) self.engine_version = kwargs.get('engine_version', None) - self.global_cluster_identifier = kwargs.get('global_cluster_identifier', None) + self.master_username = kwargs.get('master_username',None) + self.master_user_password = kwargs.get('master_user_password',None) + self.manage_master_user_password = kwargs.get('manage_master_user_password',False) self.option_group_name = kwargs.get('option_group_name', None) self.port = kwargs.get('port', None) #handle defuelt values self.replication_source_identifier = kwargs.get('replication_source_identifier', None) - self.scaling_configuration = kwargs.get('scaling_configuration', None) self.storage_encrypted = kwargs.get('storage_encrypted', None) self.storage_type = kwargs.get('storage_type', 'aurora') self.tags = kwargs.get('tags', None) - self.created_at = datetime.now() + self.created_at = kwargs.get('created_at', datetime.now()) self.status = 'available' - self.instances = {} - self.primary_endpoint = None - # self.primary_writer_instance = create db instance - # self.reader_instances = replica of the instance in different azs - - self.pk_column = kwargs.get('pk_column', 'ClusterID') - self.pk_value = kwargs.get('pk_value', None) + self.primary_writer_instance = None + self.reader_instances = [] + self.cluster_endpoint = None + self.instances_endpoints = {} # Added attribute to store endpoints + self.pk_column = kwargs.get('pk_column', 'db_cluster_identifier') + self.pk_value = kwargs.get('pk_value', self.db_cluster_identifier) + self.table_schema = """ db_cluster_identifier TEXT PRIMARY KEY, + engine TEXT, + allocated_storage INTEGER, + copy_tags_to_snapshot BOOLEAN, + db_cluster_instance_class TEXT, + database_name TEXT, + db_cluster_parameter_group_name TEXT, + db_subnet_group_name TEXT, + deletion_protection BOOLEAN, + engine_version TEXT, + master_username TEXT, + master_user_password TEXT, + manage_master_user_password BOOLEAN, + option_group_name TEXT, + port INTEGER, + replication_source_identifier TEXT, + storage_encrypted BOOLEAN, + storage_type TEXT, + tags TEXT, + created_at TEXT, + status TEXT, + primary_writer_instance TEXT, + reader_instances TEXT, + cluster_endpoint TEXT, + instances_endpoints TEXT, + pk_column TEXT, + pk_value TEXT + """ def to_dict(self) -> Dict: '''Retrieve the data of the DB cluster as a dictionary.''' - return ObjectManager.convert_object_attributes_to_dictionary( - db_cluster_identifier=self.db_cluster_identifier, - engine=self.engine, - availability_zones=self.availability_zones, - copy_tags_to_snapshot=self.copy_tags_to_snapshot, - database_name=self.database_name, - db_cluster_parameter_group_name=self.db_cluster_parameter_group_name, - db_subnet_group_name=self.db_subnet_group_name, - deletion_protection=self.deletion_protection, - enable_cloudwatch_logs_exports=self.enable_cloudwatch_logs_exports, - enable_global_write_forwarding=self.enable_global_write_forwarding, - enable_http_endpoint=self.enable_http_endpoint, - enable_limitless_database=self.enable_limitless_database, - enable_local_write_forwarding=self.enable_local_write_forwarding, - engine_version=self.engine_version, - global_cluster_identifier=self.global_cluster_identifier, - option_group_name=self.option_group_name, - port=self.port, - replication_source_identifier=self.replication_source_identifier, - scaling_configuration=self.scaling_configuration, - storage_encrypted=self.storage_encrypted, - storage_type=self.storage_type, - tags=self.tags, - created_at=self.created_at, - status=self.status, - instances=self.instances, - # primary_writer_instance =self.primary_writer_instance, - # reader_instances =self.reader_instances + return ObjectManager.ObjectManager.convert_object_attributes_to_dictionary( + db_cluster_identifier=self.db_cluster_identifier, + engine=self.engine, + allocated_storage=self.allocated_storage, + copy_tags_to_snapshot=self.copy_tags_to_snapshot, + db_cluster_instance_class=self.db_cluster_instance_class, + database_name=self.database_name, + db_cluster_parameter_group_name=self.db_cluster_parameter_group_name, + db_subnet_group_name=self.db_subnet_group_name, + deletion_protection=self.deletion_protection, + engine_version=self.engine_version, + master_username = self.master_username, + master_user_password = self.master_user_password, + manage_master_user_password = self.manage_master_user_password, + option_group_name=self.option_group_name, + port=self.port, + replication_source_identifier=self.replication_source_identifier, + storage_encrypted=self.storage_encrypted, + storage_type=self.storage_type, + tags=self.tags, + created_at=self.created_at.isoformat(), + status=self.status, + primary_writer_instance=self.primary_writer_instance, + reader_instances=self.reader_instances, + cluster_endpoint = self.cluster_endpoint, + instances_endpoints=self.instances_endpoints, pk_column=self.pk_column, pk_value=self.pk_value - ) \ No newline at end of file + ) + + def to_sql(self): + # Convert the model instance to a dictionary + data_dict = self.to_dict() + values = '(' + ", ".join(f'\'{json.dumps(v)}\'' if isinstance(v, dict) or isinstance(v, list) else f'\'{v}\'' if isinstance(v, str) else f'\'{str(v)}\'' + for v in data_dict.values()) + ')' + return values \ No newline at end of file diff --git a/DB/NEW_KT_DB/Service/Classes/DBClusterService.py b/DB/NEW_KT_DB/Service/Classes/DBClusterService.py index 65acc86d..2fc08c7c 100644 --- a/DB/NEW_KT_DB/Service/Classes/DBClusterService.py +++ b/DB/NEW_KT_DB/Service/Classes/DBClusterService.py @@ -1,49 +1,227 @@ from typing import Dict, Optional -from DataAccess import ClusterManager + +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from DataAccess import DBClusterManager from Models import DBClusterModel from Abc import DBO -from Validation import Validation +from Validation import DBClusterValiditions from DataAccess import DBClusterManager -class DBClusterService(DBO): - def __init__(self, dal: ClusterManager): + +# from DBInstanceService import DBInstanceService +import os +import json +from Validation.DBClusterValiditions import ( + validate_db_cluster_identifier, + validate_engine, + validate_database_name, + validate_db_cluster_parameter_group_name, + validate_db_subnet_group_name, + validate_port, + check_required_params, + validate_master_user_password, + validate_master_username +) +from Storage.NEW_KT_Storage.DataAccess.StorageManager import StorageManager + +class DBClusterService: + def __init__(self, dal: DBClusterManager, storage_manager: StorageManager, directory:str): self.dal = dal + self.directory = directory + self.storage_manager = storage_manager + if not self.storage_manager.is_directory_exist(directory): + self.storage_manager.create_directory(directory) - - # validations here - + def get_file_path(self, cluster_name: str): + return str(self.directory)+'\\'+str(cluster_name)+'.json' + + def is_cluster_exist(self, cluster_identifier: str): + cluster_path = self.get_file_path(cluster_identifier) + cluster_configurations_path = self.get_file_path(cluster_identifier+"_configurations") + if not self.storage_manager.is_file_exist(cluster_configurations_path) or not self.storage_manager.is_directory_exist(cluster_path): + return False + + if not self.dal.is_exists(cluster_identifier): + return False + + return True + + + def create(self, instance_controller, **kwargs): - def create(self, **attributes): '''Create a new DBCluster.''' - # create object in code using DBClusterModel.init()- assign all **attributes - # create physical object as described in task - # save in memory using DBClusterManager.createInMemoryDBCluster() function - pass + # Validate required parameters + required_params = ['db_cluster_identifier', 'engine', 'db_subnet_group_name', 'allocated_storage'] + if not check_required_params(required_params, **kwargs): + raise ValueError("Missing required parameters") + + # Perform validations + if self.dal.is_db_instance_exist(kwargs.get('db_cluster_identifier')): + raise ValueError(f"Cluster {kwargs.get('db_cluster_identifier')} already exists") + + if not validate_db_cluster_identifier(kwargs.get('db_cluster_identifier')): + raise ValueError(f"Invalid DBClusterIdentifier: {kwargs.get('db_cluster_identifier')}") + + if not validate_engine(kwargs.get('engine', '')): + raise ValueError(f"Invalid engine: {kwargs.get('engine')}") + + if 'database_name' in kwargs and kwargs['database_name'] and not validate_database_name(kwargs['database_name']): + raise ValueError(f"Invalid DatabaseName: {kwargs['database_name']}") + + if 'db_cluster_parameter_group_name' in kwargs and kwargs['db_cluster_parameter_group_name'] and not validate_db_cluster_parameter_group_name(kwargs['db_cluster_parameter_group_name']): + raise ValueError(f"Invalid DBClusterParameterGroupName: {kwargs['db_cluster_parameter_group_name']}") + + if kwargs.get('db_subnet_group_name') and not validate_db_subnet_group_name(kwargs.get('db_subnet_group_name')): + raise ValueError(f"Invalid DBSubnetGroupName: {kwargs['db_subnet_group_name']}") + + if 'port' in kwargs and kwargs['port'] and not validate_port(kwargs['port']): + raise ValueError(f"Invalid port: {kwargs['port']}. Valid range is 1150-65535.") + + if 'master_username' in kwargs and not validate_master_username(kwargs['master_username']): + raise ValueError("Invalid master username") + + if 'master_user_password' in kwargs and not validate_master_user_password(kwargs['master_user_password'], kwargs.get('manage_master_user_password', False)): + raise ValueError("Invalid master user password") + + # Create the cluster object + cluster = DBClusterModel.Cluster(**kwargs) + + # Create physical folder structure + cluster_directory = str(self.directory)+'\\'+str(cluster.db_cluster_identifier) + self.storage_manager.create_directory(cluster_directory) + + # Set cluster endpoint + cluster.cluster_endpoint = cluster_directory + + primary_instance_name = f'{cluster.db_cluster_identifier}-primary' + primary_instance = instance_controller.create_db_instance( + db_instance_identifier=primary_instance_name, + # cluster_identifier=cluster.db_cluster_identifier, + allocated_storage=cluster.allocated_storage, + master_username=cluster.master_username, + master_user_password=cluster.master_user_password + ) + + # Retrieve primary instance details + primary_instance_json_string = primary_instance.get("DBInstance") + cluster.instances_endpoints["primary_instance"] = primary_instance_json_string.get("endpoint") + cluster.primary_writer_instance = primary_instance_json_string.get('db_instance_identifier') - def delete(self): + # # Create configuration file + configuration_file_path = cluster_directory+'\\'+cluster.db_cluster_identifier + "_configurations.json" + json_object = json.dumps(cluster.to_dict()) + self.storage_manager.create_file( + file_path=configuration_file_path, content=json_object) + + cluster_to_sql = cluster.to_sql() + return self.dal.createInMemoryDBCluster(cluster_to_sql) + + + + + def delete(self,instance_controller, cluster_identifier:str): '''Delete an existing DBCluster.''' - # assign None to code object - # delete physical object - # delete from memory using DBClusterManager.deleteInMemoryDBCluster() function- send criteria using self attributes - pass + + if not self.dal.is_db_instance_exist(cluster_identifier): + raise ValueError("Cluster does not exist!!") + + file_path = self.get_file_path(cluster_identifier+"_configurations") + self.storage_manager.delete_file(file_path=file_path) + + directory_path = str(self.directory)+'\\'+str(cluster_identifier) + self.storage_manager.delete_directory(directory_path) + + columns = ['db_cluster_identifier', 'engine', 'allocated_storage', 'copy_tags_to_snapshot', + 'db_cluster_instance_class', 'database_name', 'db_cluster_parameter_group_name', + 'db_subnet_group_name', 'deletion_protection', 'engine_version', 'master_username', + 'master_user_password', 'manage_master_user_password', 'option_group_name', 'port', + 'replication_source_identifier', 'storage_encrypted', 'storage_type', 'tags', + 'created_at', 'status', 'primary_writer_instance', 'reader_instances', 'cluster_endpoint', + 'instances_endpoints', 'pk_column', 'pk_value'] + + #update configurations + current_cluster = self.describe(cluster_identifier) + cluster_dict = dict(zip(columns, current_cluster[0])) + instance_controller.delete_db_instance(db_instance_identifier = cluster_dict['primary_writer_instance'],skip_final_snapshot = True) + if cluster_dict['reader_instances'] != '[]' : + for id in cluster_dict['reader_instances']: + instance_controller.delete_db_instance(db_instance_identifier = id, skip_final_snapshot = True) - def describe(self): + self.dal.deleteInMemoryDBCluster(cluster_identifier) + + + def describe(self, cluster_id): '''Describe the details of DBCluster.''' - # use DBClusterManager.describeDBCluster() function - pass + if not self.dal.is_db_instance_exist(cluster_id): + raise ValueError("Cluster does not exist!!") + + return self.dal.describeDBCluster(cluster_id) - def modify(self, **updates): + def modify(self, cluster_id: str, **kwargs): '''Modify an existing DBCluster.''' - # update object in code - # modify physical object - # update object in memory using DBClusterManager.modifyInMemoryDBCluster() function- send criteria using self attributes - pass + + if not self.dal.is_db_instance_exist(cluster_id): + raise ValueError("Cluster does not exist!!") + + if 'db_cluster_identifier' in kwargs and not validate_db_cluster_identifier(kwargs.get('db_cluster_identifier')): + raise ValueError(f"Invalid DBClusterIdentifier: {kwargs.get('db_cluster_identifier')}") + + if 'engine' in kwargs and not validate_engine(kwargs.get('engine', '')): + raise ValueError(f"Invalid engine: {kwargs.get('engine')}") + + if 'database_name' in kwargs and kwargs['database_name'] and not validate_database_name(kwargs['database_name']): + raise ValueError(f"Invalid DatabaseName: {kwargs['database_name']}") + + if 'db_cluster_parameter_group_name' in kwargs and kwargs['db_cluster_parameter_group_name'] and not validate_db_cluster_parameter_group_name(kwargs['db_cluster_parameter_group_name']): + raise ValueError(f"Invalid DBClusterParameterGroupName: {kwargs['db_cluster_parameter_group_name']}") + + if kwargs.get('db_subnet_group_name') and not validate_db_subnet_group_name(kwargs.get('db_subnet_group_name')): + raise ValueError(f"Invalid DBSubnetGroupName: {kwargs['db_subnet_group_name']}") + + if 'port' in kwargs and kwargs['port'] and not validate_port(kwargs['port']): + raise ValueError(f"Invalid port: {kwargs['port']}. Valid range is 1150-65535.") + + if 'master_username' in kwargs and not validate_master_username(kwargs['master_username']): + raise ValueError("Invalid master username") + + if 'master_user_password' in kwargs and not validate_master_user_password(kwargs['master_user_password'], kwargs.get('manage_master_user_password', False)): + raise ValueError("Invalid master user password") + + str_parts = ', '.join(f"{key} = '{value}'" for key, value in kwargs.items()) + + #update in memory + self.dal.modifyDBCluster(cluster_id,str_parts) + + columns = ['db_cluster_identifier', 'engine', 'allocated_storage', 'copy_tags_to_snapshot', + 'db_cluster_instance_class', 'database_name', 'db_cluster_parameter_group_name', + 'db_subnet_group_name', 'deletion_protection', 'engine_version', 'master_username', + 'master_user_password', 'manage_master_user_password', 'option_group_name', 'port', + 'replication_source_identifier', 'storage_encrypted', 'storage_type', 'tags', + 'created_at', 'status', 'primary_writer_instance', 'reader_instances', 'cluster_endpoint', + 'instances_endpoints', 'pk_column', 'pk_value'] + + #update configurations + current_cluster = self.describe(cluster_id) + cluster_dict = dict(zip(columns, current_cluster[0])) + cluster_string = json.dumps(cluster_dict, indent=4) + + file_path = self.get_file_path(cluster_id+'_configurations') + self.storage_manager.delete_file(file_path) + self.storage_manager.create_file(file_path, cluster_string) + + + def get_all_cluster(self): + return self.dal.get_all_clusters() + + - def get(self): - '''get code object.''' - # return real time object - pass + diff --git a/DB/NEW_KT_DB/Test/DBClusterTests.py b/DB/NEW_KT_DB/Test/DBClusterTests.py index a34dc75a..0f4b9635 100644 --- a/DB/NEW_KT_DB/Test/DBClusterTests.py +++ b/DB/NEW_KT_DB/Test/DBClusterTests.py @@ -1,10 +1,218 @@ +import os +import sys import pytest -from GeneralTests import test_file_exists -from Service import DBClusterService +import sqlite3 +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) + +from Storage.NEW_KT_Storage.DataAccess.StorageManager import StorageManager +from DB.NEW_KT_DB.Service.Classes.DBClusterService import DBClusterService +from DB.NEW_KT_DB.DataAccess.DBClusterManager import DBClusterManager +from Controller.DBClusterController import DBClusterController +from DataAccess.ObjectManager import ObjectManager +from Service.Classes.DBInstanceService import DBInstanceManager,DBInstanceService,AlreadyExistsError,ParamValidationError,DBInstanceNotFoundError +from Exception.exception import MissingRequireParamError +from Controller.DBInstanceController import DBInstanceController + +@pytest.fixture +def object_manager(): + return ObjectManager('Clusters/instances.db') + +@pytest.fixture +def db_instance_manager(object_manager): + return DBInstanceManager(object_manager) + +@pytest.fixture +def db_instance_service(db_instance_manager): + return DBInstanceService(db_instance_manager) + +# @pytest.fixture +# def snapshot_service(object_manager): +# return SnapShotService(SnapShotManager(object_manager)) + +@pytest.fixture +def db_instance_controller(db_instance_service): + return DBInstanceController(db_instance_service) + +@pytest.fixture +def db_cluster_manager(): + return DBClusterManager('Clusters/clusters.db') + +@pytest.fixture +def storage_manager(): + return StorageManager('Clusters') + +@pytest.fixture +def db_cluster_service(db_cluster_manager,storage_manager): + return DBClusterService(db_cluster_manager,storage_manager, 'Clusters') + +@pytest.fixture +def db_cluster_controller(db_cluster_service, db_instance_controller): + return DBClusterController(db_cluster_service, db_instance_controller) + +@pytest.fixture +def db_cluster_controller_with_cleanup(db_cluster_controller): + + # Return the controller, but ensure cleanup happens after the test + yield db_cluster_controller + + # Finalizer to clean up after the test + db_cluster_controller.delete_db_cluster('ClusterTest') + +def test_create_cluster_works(db_cluster_controller_with_cleanup): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + res = db_cluster_controller_with_cleanup.create_db_cluster(**cluster_data) + # db_cluster_controller.delete_db_cluster('Cluster21') + assert res == None + +def test_create_cluster_missing_required_fields(db_cluster_controller): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + with pytest.raises(ValueError): + db_cluster_controller.create_db_cluster(**cluster_data) + +def test_create_cluster_invalide_identifier(db_cluster_controller): + cluster_data = { + 'db_cluster_identifier': 'Cluster__Test', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + with pytest.raises(ValueError): + db_cluster_controller.create_db_cluster(**cluster_data) + + +def test_create_cluster_already_exist(db_cluster_controller): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + db_cluster_controller.create_db_cluster(**cluster_data) + with pytest.raises(ValueError): + db_cluster_controller.create_db_cluster(**cluster_data) + + db_cluster_controller.delete_db_cluster('ClusterTest') + + + +def test_delete_cluster_works(db_cluster_controller): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + db_cluster_controller.create_db_cluster(**cluster_data) + db_cluster_controller.delete_db_cluster('ClusterTest') + with pytest.raises(ValueError): + db_cluster_controller.delete_db_cluster('ClusterTest') + + +def test_delete_cluster_does_not_exist(db_cluster_controller): + with pytest.raises(ValueError): + db_cluster_controller.delete_db_cluster('ClusterTest') + + + +def test_describe_cluster_works(db_cluster_controller_with_cleanup): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + db_cluster_controller_with_cleanup.create_db_cluster(**cluster_data) + res = db_cluster_controller_with_cleanup.describe_db_cluster('ClusterTest') + assert isinstance(res, list) + +def test_describe_cluster_does_not_exist(db_cluster_controller): + with pytest.raises(ValueError): + db_cluster_controller.describe_db_cluster('ClusterTest') + + +def test_modify_cluster_works(db_cluster_controller_with_cleanup): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + db_cluster_controller_with_cleanup.create_db_cluster(**cluster_data) + update_data = { + 'engine': 'postgres', + } + db_cluster_controller_with_cleanup.modify_db_cluster('ClusterTest', **update_data) + res = db_cluster_controller_with_cleanup.describe_db_cluster('ClusterTest') + columns = ['db_cluster_identifier', 'engine', 'allocated_storage', 'copy_tags_to_snapshot', + 'db_cluster_instance_class', 'database_name', 'db_cluster_parameter_group_name', + 'db_subnet_group_name', 'deletion_protection', 'engine_version', 'master_username', + 'master_user_password', 'manage_master_user_password', 'option_group_name', 'port', + 'replication_source_identifier', 'storage_encrypted', 'storage_type', 'tags', + 'created_at', 'status', 'primary_writer_instance', 'reader_instances', 'cluster_endpoint', + 'instances_endpoints', 'pk_column', 'pk_value'] + + cluster_dict = dict(zip(columns, res[0])) + assert cluster_dict['engine'] == 'postgres' + +def test_modify_cluster_does_not_exist(db_cluster_controller): + update_data = { + 'engine': 'postgres', + } + with pytest.raises(ValueError): + db_cluster_controller.modify_db_cluster('ClusterTest',**update_data) + + +def test_modify_cluster_invalide_engine(db_cluster_controller_with_cleanup): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + db_cluster_controller_with_cleanup.create_db_cluster(**cluster_data) + update_data = { + 'engine': 'invalid', + } + with pytest.raises(ValueError): + db_cluster_controller_with_cleanup.modify_db_cluster('ClusterTest',**update_data) + +def test_get_all_clusters(db_cluster_controller_with_cleanup): + cluster_data = { + 'db_cluster_identifier': 'ClusterTest', + 'engine': 'mysql', + 'allocated_storage':5, + 'db_subnet_group_name': 'my-subnet-group' + } + + db_cluster_controller_with_cleanup.create_db_cluster(**cluster_data) + res = db_cluster_controller_with_cleanup.get_all_db_clusters() + assert isinstance(res, list) + + + + + + + + + + + + + -def test_create(option_group, capsys): - """Test the create method.""" - DBClusterService.create("name"="DBClusterTest") - # ... - assert test_file_exists("DBClusterTest") \ No newline at end of file diff --git a/DB/NEW_KT_DB/Validation/DBClusterValiditions.py b/DB/NEW_KT_DB/Validation/DBClusterValiditions.py index fecf15d2..752119cd 100644 --- a/DB/NEW_KT_DB/Validation/DBClusterValiditions.py +++ b/DB/NEW_KT_DB/Validation/DBClusterValiditions.py @@ -1,6 +1,127 @@ import re -from GeneralValidations import +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from Validation import GeneralValidations +# def is_db_cluster_name_valid(cluster_name): +# return is_length_in_range(cluster_name, 5, 20) -def is_db_cluster_name_valid(cluster_name): - return is_length_in_range(cluster_name, 5, 20) +def validate_db_cluster_identifier(identifier: str) -> bool: + """ + Validates the DBClusterIdentifier based on the cluster type. + Returns True if valid, False otherwise. + """ + length_constraint = 52 + pattern = r'^[a-zA-Z][a-zA-Z0-9\-]{0,' + str(length_constraint - 1) + r'}(? bool: + # """ + # Validates the engine type. + # Returns True if valid, False otherwise. + # """ + # valid_engines = ['aurora-mysql', 'aurora-postgresql', 'mysql', 'postgres', 'neptune'] + # return engine in valid_engines + """ + Validates the engine type. + Reuses `string_in_dict` for engine validation. + """ + valid_engines = ['mysql', 'postgres'] + return GeneralValidations.string_in_dict(engine, dict.fromkeys(valid_engines, True)) + +def validate_database_name(database_name: str) -> bool: + """ + Validates the database name (if provided). + Returns True if valid, False otherwise. + """ + # if not database_name: + # return True # No validation needed if not provided + # return re.match(r'^[a-zA-Z0-9]{1,64}$', database_name) is not None + if not database_name: + return True # No validation needed if not provided + return GeneralValidations.is_length_in_range(database_name, 1, 64) and GeneralValidations.is_string_matches_regex(database_name, r'^[a-zA-Z0-9]+$') + +def validate_db_cluster_parameter_group_name(group_name: str) -> bool: + """ + Validates the DBClusterParameterGroupName. + Returns True if valid, False otherwise. + """ + if not group_name: + return True # No validation needed if not provided + return re.match(r'^[a-zA-Z0-9\-]+$', group_name) is not None + +def validate_db_subnet_group_name(subnet_group_name: str) -> bool: + """ + Validates the DBSubnetGroupName. + Returns True if valid, False otherwise. + """ + if not subnet_group_name: + return True # No validation needed if not provided + return re.match(r'^[a-zA-Z0-9\-]+$', subnet_group_name) is not None + +def validate_port(port: int) -> bool: + """ + Validates the port number. + Returns True if valid, False otherwise. + """ + # return 1150 <= port <= 65535 + + return GeneralValidations.is_valid_number(port, 1150, 65535) +# from general_validations import ( +# is_length_in_range, +# is_string_matches_regex +# ) + +def validate_master_username(username: str) -> bool: + """ + Validates the MasterUsername. + Constraints: + - Must be 1 to 16 letters or numbers. + - First character must be a letter. + - Can't be a reserved word for the chosen database engine. + """ + if not username: + return True # Not required, so no validation needed if not provided + if not GeneralValidations.is_length_in_range(username, 1, 16): + return False + if not username[0].isalpha(): # First character must be a letter + return False + # Optional: Add a list of reserved words for specific engines and check against that + reserved_words = [] # Define reserved words for the engine if applicable + if username.lower() in reserved_words: + return False + return GeneralValidations.is_string_matches_regex(username, r'^[a-zA-Z0-9]+$') # Letters and numbers only + +def validate_master_user_password(password: str, manage_master_user_password: bool) -> bool: + """ + Validates the MasterUserPassword. + Constraints: + - Must contain from 8 to 41 characters. + - Can contain any printable ASCII character except "/", "\"", or "@". + - Can't be specified if ManageMasterUserPassword is turned on. + """ + if not password: + return True # Not required, so no validation needed if not provided + if manage_master_user_password: + return False # Can't specify password if ManageMasterUserPassword is turned on + if not is_length_in_range(password, 8, 41): + return False + # Ensure password doesn't contain "/", "\"", or "@" + restricted_chars = ["/", "\\", "@"] + for char in restricted_chars: + if char in password: + return False + # Optionally, check for only printable ASCII characters (32 to 126 ASCII range) + if not all(32 <= ord(c) <= 126 for c in password): + return False + return True + +def check_required_params(required_params, **kwargs): + for param in required_params: + if param not in kwargs.keys(): + return False + return True diff --git a/Storage/KT_Storage/DataAccess/StorageManager.py b/Storage/KT_Storage/DataAccess/StorageManager.py index eee45534..94f7e5d2 100644 --- a/Storage/KT_Storage/DataAccess/StorageManager.py +++ b/Storage/KT_Storage/DataAccess/StorageManager.py @@ -3,9 +3,9 @@ import os import aiofiles import shutil -from Crypto.Cipher import AES -from Crypto.Util.Padding import pad -import base6import +from Cryptodome.Cipher import AES +from Cryptodome.Util.Padding import pad +# import base6import URL_SERVER = 's3/KT_cloud/Storage/server' @@ -48,7 +48,7 @@ def get(self, bucket, key , version_id) -> Dict[str, Any]: file_path = os.path.join(self.server_path, bucket, versioned_file_name) if not os.path.exists(file_path): - return {'error': 'File not found'} + raise FileNotFoundError(f"File '{key}' with version '{version_id}' not found in bucket '{bucket}'.") if os.path.isdir(file_path): # If the object is a directory, return its metadata and list of contents @@ -76,7 +76,7 @@ def get(self, bucket, key , version_id) -> Dict[str, Any]: def delete_by_name(self, bucket_name, version_id, key) -> None: """Delete a specified file or directory by name in a bucket and version.""" file_name, file_extension = os.path.splitext(key) - file_name_path = f"{file_name}{file_extension}" + file_name_path = f"{file_name}.v{version_id}{file_extension}" file_path = os.path.join(self.server_path, bucket_name, file_name_path) if os.path.exists(file_path): diff --git a/Storage/KT_Storage/DataAccess/VersionManager.py b/Storage/KT_Storage/DataAccess/VersionManager.py index dfccf931..d7aca55c 100644 --- a/Storage/KT_Storage/DataAccess/VersionManager.py +++ b/Storage/KT_Storage/DataAccess/VersionManager.py @@ -1,10 +1,16 @@ -from DataAccess import StorageManager +# from DataAccess import StorageManager from typing import Dict, Any import json import aiofiles import os -from Models.VesionModel import Version +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))) + +from ..Models.VesionModel import Version from .StorageManager import StorageManager class VersionManager: def __init__(self, metadata_file="s3 project/KT_Cloud/Storage/server/metadata.json"): diff --git a/Storage/KT_Storage/Models/VesionModel.py b/Storage/KT_Storage/Models/VesionModel.py index 6bbf2ad5..5cdb8b00 100644 --- a/Storage/KT_Storage/Models/VesionModel.py +++ b/Storage/KT_Storage/Models/VesionModel.py @@ -1,7 +1,7 @@ from datetime import datetime import hashlib -from AclModel import Acl -from Tag import Tag +from .AclModel import Acl +from .Tag import Tag # domain/versioning.py