From e3e93a485f09cd3b0674d55c9f3490786a0d3d7e Mon Sep 17 00:00:00 2001 From: Balint David Date: Wed, 5 Jul 2023 12:40:40 +0200 Subject: [PATCH 1/7] delete redshift related source code --- .../api/Objects/Environment/queries.py | 10 - .../api/Objects/Environment/resolvers.py | 16 - .../api/Objects/RedshiftCluster/__init__.py | 9 - .../Objects/RedshiftCluster/input_types.py | 40 -- .../api/Objects/RedshiftCluster/mutations.py | 100 --- .../api/Objects/RedshiftCluster/queries.py | 64 -- .../api/Objects/RedshiftCluster/resolvers.py | 593 ----------------- .../api/Objects/RedshiftCluster/schema.py | 96 --- backend/dataall/api/Objects/__init__.py | 1 - backend/dataall/api/constants.py | 7 - backend/dataall/aws/handlers/glue.py | 32 - backend/dataall/aws/handlers/redshift.py | 618 ------------------ backend/dataall/cdkproxy/stacks/__init__.py | 1 - .../cdkproxy/stacks/policies/__init__.py | 4 +- .../cdkproxy/stacks/policies/redshift.py | 72 -- .../cdkproxy/stacks/redshift_cluster.py | 189 ------ backend/dataall/db/api/__init__.py | 1 - backend/dataall/db/api/environment.py | 51 +- backend/dataall/db/api/redshift_cluster.py | 583 ----------------- backend/dataall/db/models/RedshiftCluster.py | 41 -- .../db/models/RedshiftClusterDataset.py | 17 - .../db/models/RedshiftClusterDatasetTable.py | 20 - backend/dataall/db/models/__init__.py | 3 - 23 files changed, 3 insertions(+), 2565 deletions(-) delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/__init__.py delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/input_types.py delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/mutations.py delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/queries.py delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/resolvers.py delete mode 100644 backend/dataall/api/Objects/RedshiftCluster/schema.py delete mode 100644 backend/dataall/aws/handlers/glue.py delete mode 100644 backend/dataall/aws/handlers/redshift.py delete mode 100644 backend/dataall/cdkproxy/stacks/policies/redshift.py delete mode 100644 backend/dataall/cdkproxy/stacks/redshift_cluster.py delete mode 100644 backend/dataall/db/api/redshift_cluster.py delete mode 100644 backend/dataall/db/models/RedshiftCluster.py delete mode 100644 backend/dataall/db/models/RedshiftClusterDataset.py delete mode 100644 backend/dataall/db/models/RedshiftClusterDatasetTable.py diff --git a/backend/dataall/api/Objects/Environment/queries.py b/backend/dataall/api/Objects/Environment/queries.py index 18f266c3f..acc59dca8 100644 --- a/backend/dataall/api/Objects/Environment/queries.py +++ b/backend/dataall/api/Objects/Environment/queries.py @@ -71,16 +71,6 @@ test_scope='Environment', ) -listEnvironmentRedshiftClusters = gql.QueryField( - name='listEnvironmentClusters', - type=gql.Ref('RedshiftClusterSearchResult'), - args=[ - gql.Argument(name='environmentUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='filter', type=gql.Ref('RedshiftClusterFilter')), - ], - resolver=list_environment_redshift_clusters, -) - listEnvironmentInvitedGroups = gql.QueryField( name='listEnvironmentInvitedGroups', diff --git a/backend/dataall/api/Objects/Environment/resolvers.py b/backend/dataall/api/Objects/Environment/resolvers.py index 903592cf2..4c3fb690e 100644 --- a/backend/dataall/api/Objects/Environment/resolvers.py +++ b/backend/dataall/api/Objects/Environment/resolvers.py @@ -549,22 +549,6 @@ def delete_environment( return True -def list_environment_redshift_clusters( - context: Context, source, environmentUri: str = None, filter: dict = None -): - if not filter: - filter = dict() - with context.engine.scoped_session() as session: - return Environment.paginated_environment_redshift_clusters( - session=session, - username=context.username, - groups=context.groups, - uri=environmentUri, - data=filter, - check_perm=True, - ) - - def enable_subscriptions( context: Context, source, environmentUri: str = None, input: dict = None ): diff --git a/backend/dataall/api/Objects/RedshiftCluster/__init__.py b/backend/dataall/api/Objects/RedshiftCluster/__init__.py deleted file mode 100644 index dfa46b264..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from . import ( - input_types, - mutations, - queries, - resolvers, - schema, -) - -__all__ = ['resolvers', 'schema', 'input_types', 'queries', 'mutations'] diff --git a/backend/dataall/api/Objects/RedshiftCluster/input_types.py b/backend/dataall/api/Objects/RedshiftCluster/input_types.py deleted file mode 100644 index 9a96b1740..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/input_types.py +++ /dev/null @@ -1,40 +0,0 @@ -from ... import gql - -NewClusterInput = gql.InputType( - name='NewClusterInput', - arguments=[ - gql.Argument(name='label', type=gql.NonNullableType(gql.String)), - gql.Argument(name='description', type=gql.String), - gql.Argument(name='nodeType', type=gql.NonNullableType(gql.String)), - gql.Argument(name='numberOfNodes', type=gql.NonNullableType(gql.Integer)), - gql.Argument(name='masterDatabaseName', type=gql.NonNullableType(gql.String)), - gql.Argument(name='masterUsername', type=gql.NonNullableType(gql.String)), - gql.Argument(name='databaseName', type=gql.String), - gql.Argument(name='vpc', type=gql.NonNullableType(gql.String)), - gql.Argument(name='subnetIds', type=gql.ArrayType(gql.String)), - gql.Argument(name='securityGroupIds', type=gql.ArrayType(gql.String)), - gql.Argument(name='tags', type=gql.ArrayType(gql.String)), - gql.Argument(name='SamlGroupName', type=gql.String), - ], -) - -ImportClusterInput = gql.InputType( - name='ImportClusterInput', - arguments=[ - gql.Argument(name='label', type=gql.NonNullableType(gql.String)), - gql.Argument(name='clusterIdentifier', type=gql.NonNullableType(gql.String)), - gql.Argument(name='description', type=gql.String), - gql.Argument(name='tags', type=gql.ArrayType(gql.String)), - gql.Argument(name='databaseName', type=gql.String), - gql.Argument(name='SamlGroupName', type=gql.String), - ], -) - -RedshiftClusterDatasetFilter = gql.InputType( - name='RedshiftClusterDatasetFilter', - arguments=[ - gql.Argument('term', gql.String), - gql.Argument('page', gql.Integer), - gql.Argument('pageSize', gql.Integer), - ], -) diff --git a/backend/dataall/api/Objects/RedshiftCluster/mutations.py b/backend/dataall/api/Objects/RedshiftCluster/mutations.py deleted file mode 100644 index e4586b9fe..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/mutations.py +++ /dev/null @@ -1,100 +0,0 @@ -from ... import gql -from .resolvers import * - -createRedshiftCluster = gql.MutationField( - name='createRedshiftCluster', - args=[ - gql.Argument(name='environmentUri', type=gql.String), - gql.Argument(name='clusterInput', type=gql.Ref('NewClusterInput')), - ], - type=gql.Ref('RedshiftCluster'), - resolver=create, -) - -deleteRedshiftCluster = gql.MutationField( - name='deleteRedshiftCluster', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='deleteFromAWS', type=gql.Boolean), - ], - type=gql.Boolean, - resolver=delete, -) - -rebootRedshiftCluster = gql.MutationField( - name='rebootRedshiftCluster', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - type=gql.Boolean, - resolver=reboot_cluster, -) - -resumeRedshiftCluster = gql.MutationField( - name='resumeRedshiftCluster', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - type=gql.Boolean, - resolver=resume_cluster, -) - -pauseRedshiftCluster = gql.MutationField( - name='pauseRedshiftCluster', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - type=gql.Boolean, - resolver=pause_cluster, -) - -importRedshiftCluster = gql.MutationField( - name='importRedshiftCluster', - args=[ - gql.Argument(name='environmentUri', type=gql.NonNullableType(gql.String)), - gql.Argument( - name='clusterInput', type=gql.NonNullableType(gql.Ref('ImportClusterInput')) - ), - ], - type=gql.Ref('RedshiftCluster'), - resolver=import_cluster, -) - -addDatasetToRedshiftCluster = gql.MutationField( - name='addDatasetToRedshiftCluster', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='datasetUri', type=gql.NonNullableType(gql.String)), - ], - type=gql.Boolean, - resolver=add_dataset_to_cluster, -) - - -removeDatasetFromRedshiftCluster = gql.MutationField( - name='removeDatasetFromRedshiftCluster', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='datasetUri', type=gql.NonNullableType(gql.String)), - ], - type=gql.Boolean, - resolver=remove_dataset_from_cluster, -) - -enableRedshiftClusterDatasetTableCopy = gql.MutationField( - name='enableRedshiftClusterDatasetTableCopy', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='datasetUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='tableUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='schema', type=gql.String), - gql.Argument(name='dataLocation', type=gql.String), - ], - type=gql.Boolean, - resolver=enable_dataset_table_copy, -) - -disableRedshiftClusterDatasetTableCopy = gql.MutationField( - name='disableRedshiftClusterDatasetTableCopy', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='datasetUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='tableUri', type=gql.NonNullableType(gql.String)), - ], - type=gql.Boolean, - resolver=disable_dataset_table_copy, -) diff --git a/backend/dataall/api/Objects/RedshiftCluster/queries.py b/backend/dataall/api/Objects/RedshiftCluster/queries.py deleted file mode 100644 index 69cab7331..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/queries.py +++ /dev/null @@ -1,64 +0,0 @@ -from ... import gql -from .resolvers import * - -getRedshiftCluster = gql.QueryField( - name='getRedshiftCluster', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - type=gql.Ref('RedshiftCluster'), - resolver=get_cluster, -) - - -getRedshiftClusterConsoleAccess = gql.QueryField( - name='getRedshiftClusterConsoleAccess', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - type=gql.String, - resolver=get_console_access, -) - -listRedshiftClusterAvailableDatasets = gql.QueryField( - name='listRedshiftClusterAvailableDatasets', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='filter', type=gql.Ref('RedshiftClusterDatasetFilter')), - ], - resolver=list_cluster_available_datasets, - type=gql.Ref('DatasetSearchResult'), -) - -listRedshiftClusterDatasets = gql.QueryField( - name='listRedshiftClusterDatasets', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='filter', type=gql.Ref('RedshiftClusterDatasetFilter')), - ], - resolver=list_cluster_datasets, - type=gql.Ref('DatasetSearchResult'), -) - -listRedshiftClusterAvailableDatasetTables = gql.QueryField( - name='listRedshiftClusterAvailableDatasetTables', - type=gql.Ref('DatasetTableSearchResult'), - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='filter', type=gql.Ref('DatasetTableFilter')), - ], - resolver=list_available_cluster_dataset_tables, -) - -listRedshiftClusterCopiedDatasetTables = gql.QueryField( - name='listRedshiftClusterCopyEnabledTables', - type=gql.Ref('DatasetTableSearchResult'), - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)), - gql.Argument(name='filter', type=gql.Ref('DatasetTableFilter')), - ], - resolver=list_copy_enabled_dataset_tables, -) - -getRedshiftClusterDatabaseCredentials = gql.QueryField( - name='getRedshiftClusterDatabaseCredentials', - args=[gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String))], - resolver=get_datahubdb_credentials, - type=gql.Ref('RedshiftClusterCredentials'), -) diff --git a/backend/dataall/api/Objects/RedshiftCluster/resolvers.py b/backend/dataall/api/Objects/RedshiftCluster/resolvers.py deleted file mode 100644 index 0fa854532..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/resolvers.py +++ /dev/null @@ -1,593 +0,0 @@ -import json -import logging - -from botocore.exceptions import ClientError - -from .... import db -from ...constants import RedshiftClusterRole -from ..Stack import stack_helper -from ....api.context import Context -from ....aws.handlers.redshift import Redshift -from ....aws.handlers.service_handlers import Worker -from ....aws.handlers.sts import SessionHelper -from ....db import permissions, models -from ....db.api import ResourcePolicy, KeyValueTag, Stack - -log = logging.getLogger(__name__) - - -def create( - context: Context, source, environmentUri: str = None, clusterInput: dict = None -): - - with context.engine.scoped_session() as session: - - cluster = db.api.RedshiftCluster.create( - session=session, - username=context.username, - groups=context.groups, - uri=environmentUri, - data=clusterInput, - check_perm=True, - ) - - log.debug(f'Create Redshift Cluster Stack: {cluster}') - - stack = Stack.create_stack( - session=session, - environment_uri=cluster.environmentUri, - target_type='redshift', - target_uri=cluster.clusterUri, - target_label=cluster.label, - ) - cluster.CFNStackName = stack.name if stack else None - - stack_helper.deploy_stack(targetUri=cluster.clusterUri) - cluster.userRoleForCluster = RedshiftClusterRole.Creator.value - return cluster - - -def import_cluster(context: Context, source, environmentUri: str, clusterInput: dict): - - with context.engine.scoped_session() as session: - - ResourcePolicy.check_user_resource_permission( - session=session, - username=context.username, - groups=context.groups, - resource_uri=environmentUri, - permission_name=permissions.CREATE_REDSHIFT_CLUSTER, - ) - db.api.Environment.check_group_environment_permission( - session=session, - username=context.username, - groups=context.groups, - uri=environmentUri, - group=clusterInput['SamlGroupName'], - permission_name=permissions.CREATE_REDSHIFT_CLUSTER, - ) - environment = db.api.Environment.get_environment_by_uri(session, environmentUri) - - aws_cluster_details = Redshift.describe_clusters( - **{ - 'accountid': environment.AwsAccountId, - 'region': environment.region, - 'cluster_id': clusterInput['clusterIdentifier'], - } - ) - - if not aws_cluster_details: - raise db.exceptions.AWSResourceNotFound( - action='IMPORT_REDSHIFT_CLUSTER', - message=f"{clusterInput['clusterIdentifier']} " - f'not found on AWS {environment.AwsAccountId}//{environment.region}', - ) - - cluster = models.RedshiftCluster( - environmentUri=environment.environmentUri, - organizationUri=environment.organizationUri, - owner=context.username, - label=clusterInput['label'], - description=clusterInput.get('description'), - tags=clusterInput.get('tags'), - region=environment.region, - AwsAccountId=environment.AwsAccountId, - imported=True, - SamlGroupName=clusterInput.get('SamlGroupName', environment.SamlGroupName), - ) - cluster = map_aws_details_to_model( - aws_cluster_details=aws_cluster_details, cluster=cluster - ) - session.add(cluster) - session.commit() - - stack = models.Stack( - targetUri=cluster.clusterUri, - accountid=cluster.AwsAccountId, - region=cluster.region, - stack='redshift', - ) - session.add(stack) - cluster.CFNStackName = f'stack-{stack.stackUri}' if stack else None - session.commit() - - redshift_assign_role_task = models.Task( - targetUri=cluster.clusterUri, - action='redshift.iam_roles.update', - ) - session.add(redshift_assign_role_task) - session.commit() - - log.info('Updating imported cluster iam_roles') - Worker.queue(engine=context.engine, task_ids=[redshift_assign_role_task.taskUri]) - - stack_helper.deploy_stack(targetUri=cluster.clusterUri) - - return cluster - - -def get_cluster(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.get_cluster( - session=session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data=None, - check_perm=True, - ) - - -def resolve_user_role(context: Context, source: models.RedshiftCluster): - if not source: - return None - if context.username and source.owner == context.username: - return RedshiftClusterRole.Creator.value - elif context.groups and source.SamlGroupName in context.groups: - return RedshiftClusterRole.Admin.value - return RedshiftClusterRole.NoPermission.value - - -def get_cluster_status(context: Context, source: models.RedshiftCluster): - if not source: - return None - with context.engine.scoped_session() as session: - try: - aws_cluster = Redshift.describe_clusters( - **{ - 'accountid': source.AwsAccountId, - 'region': source.region, - 'cluster_id': source.name, - } - ) - if aws_cluster: - map_aws_details_to_model(aws_cluster, source) - if not source.external_schema_created: - task_init_db = models.Task( - targetUri=source.clusterUri, - action='redshift.cluster.init_database', - ) - session.add(task_init_db) - session.commit() - Worker.queue(engine=context.engine, task_ids=[task_init_db.taskUri]) - - return source.status - except ClientError as e: - log.error(f'Failed to retrieve cluster status due to: {e}') - - -def map_aws_details_to_model(aws_cluster_details, cluster): - cluster.name = aws_cluster_details.get('ClusterIdentifier') - cluster.status = aws_cluster_details.get('ClusterStatus') - cluster.numberOfNodes = aws_cluster_details.get('NumberOfNodes') - cluster.masterUsername = aws_cluster_details.get('MasterUsername') - cluster.masterDatabaseName = aws_cluster_details.get('DBName') - cluster.endpoint = ( - aws_cluster_details.get('Endpoint').get('Address') - if aws_cluster_details.get('Endpoint') - else None - ) - cluster.port = ( - aws_cluster_details.get('Endpoint').get('Port') - if aws_cluster_details.get('Endpoint') - else None - ) - cluster.subnetGroupName = aws_cluster_details.get('ClusterSubnetGroupName') - cluster.IAMRoles = ( - [role.get('IamRoleArn') for role in aws_cluster_details.get('IamRoles')] - if aws_cluster_details.get('IamRoles') - else None - ) - cluster.nodeType = aws_cluster_details.get('NodeType') - cluster.securityGroupIds = ( - [ - vpc.get('VpcSecurityGroupId') - for vpc in aws_cluster_details.get('VpcSecurityGroups') - ] - if aws_cluster_details.get('VpcSecurityGroups') - else None - ) - cluster.vpc = aws_cluster_details.get('VpcId') - cluster.tags = ( - [{tag.get('Key'), tag.get('Value')} for tag in aws_cluster_details.get('tags')] - if aws_cluster_details.get('tags') - else None - ) - return cluster - - -def get_cluster_organization(context: Context, source: models.RedshiftCluster): - if not source: - return None - with context.engine.scoped_session() as session: - org = session.query(models.Organization).get(source.organizationUri) - return org - - -def get_cluster_environment(context: Context, source: models.RedshiftCluster): - if not source: - return None - with context.engine.scoped_session() as session: - return db.api.Environment.get_environment_by_uri(session, source.environmentUri) - - -def delete( - context: Context, source, clusterUri: str = None, deleteFromAWS: bool = False -): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.DELETE_REDSHIFT_CLUSTER, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - env: models.Environment = db.api.Environment.get_environment_by_uri( - session, cluster.environmentUri - ) - db.api.RedshiftCluster.delete_all_cluster_linked_objects(session, clusterUri) - - KeyValueTag.delete_key_value_tags(session, cluster.clusterUri, 'redshift') - - session.delete(cluster) - - ResourcePolicy.delete_resource_policy( - session=session, - resource_uri=clusterUri, - group=cluster.SamlGroupName, - ) - - if deleteFromAWS: - stack_helper.delete_stack( - target_uri=clusterUri, - accountid=env.AwsAccountId, - cdk_role_arn=env.CDKRoleArn, - region=env.region - ) - - return True - - -def pause_cluster(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.PAUSE_REDSHIFT_CLUSTER, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - Redshift.pause_cluster( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - } - ) - return True - - -def resume_cluster(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.RESUME_REDSHIFT_CLUSTER, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - Redshift.resume_cluster( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - } - ) - return True - - -def reboot_cluster(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.REBOOT_REDSHIFT_CLUSTER, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - Redshift.reboot_cluster( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - } - ) - return True - - -def get_console_access(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.GET_REDSHIFT_CLUSTER_CREDENTIALS, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - environment = db.api.Environment.get_environment_by_uri( - session, cluster.environmentUri - ) - pivot_session = SessionHelper.remote_session(environment.AwsAccountId) - aws_session = SessionHelper.get_session( - base_session=pivot_session, - role_arn=environment.EnvironmentDefaultIAMRoleArn, - ) - url = SessionHelper.get_console_access_url( - aws_session, region=cluster.region, redshiftcluster=cluster.name - ) - return url - - -def add_dataset_to_cluster( - context: Context, source, clusterUri: str = None, datasetUri: str = None -): - with context.engine.scoped_session() as session: - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - aws_cluster = Redshift.describe_clusters( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - } - ) - if aws_cluster: - map_aws_details_to_model(aws_cluster, cluster) - cluster, dataset = db.api.RedshiftCluster.add_dataset( - session=session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data={'datasetUri': datasetUri}, - check_perm=True, - ) - task = models.Task( - targetUri=cluster.clusterUri, - action='redshift.cluster.create_external_schema', - ) - session.add(task) - session.commit() - - Worker.queue(context.engine, [task.taskUri]) - return True - - -def remove_dataset_from_cluster( - context: Context, source, clusterUri: str = None, datasetUri: str = None -): - with context.engine.scoped_session() as session: - cluster, dataset = db.api.RedshiftCluster.remove_dataset_from_cluster( - session=session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data={'datasetUri': datasetUri}, - check_perm=True, - ) - if dataset.environmentUri != cluster.environmentUri: - database = f'{dataset.GlueDatabaseName}shared' - else: - database = dataset.GlueDatabaseName - task = models.Task( - targetUri=cluster.clusterUri, - action='redshift.cluster.drop_external_schema', - payload={'database': database}, - ) - session.add(task) - session.commit() - - Worker.queue(context.engine, [task.taskUri]) - return True - - -def list_cluster_available_datasets( - context: Context, source, clusterUri: str = None, filter: dict = None -): - if not filter: - filter = {} - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.list_available_datasets( - session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data=filter, - check_perm=True, - ) - - -def list_cluster_datasets( - context: Context, source, clusterUri: str = None, filter: dict = None -): - if not filter: - filter = {} - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.list_cluster_datasets( - session=session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data=filter, - check_perm=True, - ) - - -def list_available_cluster_dataset_tables( - context: Context, source, clusterUri: str = None, filter: dict = None -): - if not filter: - filter = {} - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.list_available_cluster_tables( - session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data=filter, - check_perm=True, - ) - - -def list_copy_enabled_dataset_tables( - context: Context, source, clusterUri: str = None, filter: dict = None -): - if not filter: - filter = {} - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.list_copy_enabled_tables( - session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data=filter, - check_perm=True, - ) - - -def get_datahubdb_credentials(context: Context, source, clusterUri: str = None): - with context.engine.scoped_session() as session: - ResourcePolicy.check_user_resource_permission( - session=session, - resource_uri=clusterUri, - username=context.username, - groups=context.groups, - permission_name=permissions.GET_REDSHIFT_CLUSTER_CREDENTIALS, - ) - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - creds = Redshift.get_cluster_credentials( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'secret_name': cluster.datahubSecret, - } - ) - return { - 'clusterUri': clusterUri, - 'endpoint': cluster.endpoint, - 'port': cluster.port, - 'database': cluster.databaseName, - 'user': cluster.databaseUser, - 'password': creds, - } - - -def resolve_stack(context: Context, source: models.RedshiftCluster, **kwargs): - if not source: - return None - return stack_helper.get_stack_with_cfn_resources( - targetUri=source.clusterUri, - environmentUri=source.environmentUri, - ) - - -def enable_dataset_table_copy( - context: Context, - source, - clusterUri: str = None, - datasetUri: str = None, - tableUri: str = None, - schema: str = None, - dataLocation: str = None, -): - with context.engine.scoped_session() as session: - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, clusterUri - ) - db.api.RedshiftCluster.enable_copy_table( - session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data={ - 'datasetUri': datasetUri, - 'tableUri': tableUri, - 'schema': schema, - 'dataLocation': dataLocation, - }, - check_perm=True, - ) - log.info( - f'Redshift copy tableUri {tableUri} starting for cluster' - f'{cluster.name} in account {cluster.AwsAccountId}' - ) - task = models.Task( - action='redshift.subscriptions.copy', - targetUri=cluster.environmentUri, - payload={ - 'datasetUri': datasetUri, - 'message': json.dumps({'clusterUri': clusterUri}), - 'tableUri': tableUri, - }, - ) - session.add(task) - session.commit() - - Worker.queue(context.engine, [task.taskUri]) - return True - - -def disable_dataset_table_copy( - context: Context, - source, - clusterUri: str = None, - datasetUri: str = None, - tableUri: str = None, -): - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.disable_copy_table( - session, - username=context.username, - groups=context.groups, - uri=clusterUri, - data={'datasetUri': datasetUri, 'tableUri': tableUri}, - check_perm=True, - ) diff --git a/backend/dataall/api/Objects/RedshiftCluster/schema.py b/backend/dataall/api/Objects/RedshiftCluster/schema.py deleted file mode 100644 index 4852caa88..000000000 --- a/backend/dataall/api/Objects/RedshiftCluster/schema.py +++ /dev/null @@ -1,96 +0,0 @@ -from ... import gql -from .resolvers import * -from ....api.constants import RedshiftClusterRole - -RedshiftCluster = gql.ObjectType( - name='RedshiftCluster', - fields=[ - gql.Field(name='clusterUri', type=gql.ID), - gql.Field(name='environmentUri', type=gql.String), - gql.Field(name='name', type=gql.String), - gql.Field(name='label', type=gql.String), - gql.Field(name='description', type=gql.String), - gql.Field(name='tags', type=gql.ArrayType(gql.String)), - gql.Field(name='owner', type=gql.String), - gql.Field(name='created', type=gql.String), - gql.Field(name='updated', type=gql.String), - gql.Field(name='AwsAccountId', type=gql.String), - gql.Field(name='region', type=gql.String), - gql.Field(name='clusterArn', type=gql.String), - gql.Field(name='clusterName', type=gql.String), - gql.Field(name='databaseName', type=gql.String), - gql.Field(name='databaseUser', type=gql.String), - gql.Field(name='datahubSecret', type=gql.String), - gql.Field(name='masterUsername', type=gql.String), - gql.Field(name='masterDatabaseName', type=gql.String), - gql.Field(name='masterSecret', type=gql.String), - gql.Field(name='nodeType', type=gql.String), - gql.Field(name='numberOfNodes', type=gql.Integer), - gql.Field(name='kmsAlias', type=gql.String), - gql.Field(name='subnetGroupName', type=gql.String), - gql.Field(name='CFNStackName', type=gql.String), - gql.Field(name='CFNStackStatus', type=gql.String), - gql.Field(name='CFNStackArn', type=gql.String), - gql.Field(name='port', type=gql.String), - gql.Field(name='endpoint', type=gql.String), - gql.Field(name='SamlGroupName', type=gql.String), - gql.Field(name='imported', type=gql.Boolean), - gql.Field(name='IAMRoles', type=gql.ArrayType(gql.String)), - gql.Field(name='vpc', type=gql.String), - gql.Field(name='subnetIds', type=gql.ArrayType(gql.String)), - gql.Field(name='securityGroupIds', type=gql.ArrayType(gql.String)), - gql.Field( - name='userRoleForCluster', - type=RedshiftClusterRole.toGraphQLEnum(), - resolver=resolve_user_role, - ), - gql.Field( - name='userRoleInEnvironment', type=RedshiftClusterRole.toGraphQLEnum() - ), - gql.Field( - 'organization', - type=gql.Ref('Organization'), - resolver=get_cluster_organization, - ), - gql.Field( - 'environment', type=gql.Ref('Environment'), resolver=get_cluster_environment - ), - gql.Field('status', type=gql.String, resolver=get_cluster_status), - gql.Field(name='stack', type=gql.Ref('Stack'), resolver=resolve_stack), - ], -) - - -RedshiftClusterSearchResult = gql.ObjectType( - name='RedshiftClusterSearchResult', - fields=[ - gql.Field(name='count', type=gql.Integer), - gql.Field(name='page', type=gql.Integer), - gql.Field(name='pages', type=gql.Integer), - gql.Field(name='hasNext', type=gql.Boolean), - gql.Field(name='hasPrevious', type=gql.Boolean), - gql.Field(name='nodes', type=gql.ArrayType(RedshiftCluster)), - ], -) - -RedshiftClusterFilter = gql.InputType( - name='RedshiftClusterFilter', - arguments=[ - gql.Argument('term', gql.String), - gql.Argument('roles', gql.ArrayType(gql.Ref('RedshiftClusterRole'))), - gql.Argument(name='page', type=gql.Integer), - gql.Argument(name='pageSize', type=gql.Integer), - ], -) - -RedshiftClusterCredentials = gql.ObjectType( - name='RedshiftClusterCredentials', - fields=[ - gql.Field(name='clusterUri', type=gql.ID), - gql.Field('endpoint', gql.String), - gql.Field('database', gql.String), - gql.Field('port', gql.Integer), - gql.Field('password', gql.String), - gql.Field('user', gql.String), - ], -) diff --git a/backend/dataall/api/Objects/__init__.py b/backend/dataall/api/Objects/__init__.py index 57fbb8129..161dc3562 100644 --- a/backend/dataall/api/Objects/__init__.py +++ b/backend/dataall/api/Objects/__init__.py @@ -21,7 +21,6 @@ Organization, Stack, Test, - RedshiftCluster, Glossary, Feed, Notification, diff --git a/backend/dataall/api/constants.py b/backend/dataall/api/constants.py index a8218e8b6..848f7491a 100644 --- a/backend/dataall/api/constants.py +++ b/backend/dataall/api/constants.py @@ -73,13 +73,6 @@ class GlossaryRole(GraphQLEnumMapper): NoPermission = '000' -class RedshiftClusterRole(GraphQLEnumMapper): - Creator = '950' - Admin = '900' - Shared = '300' - NoPermission = '000' - - class ScheduledQueryRole(GraphQLEnumMapper): Creator = '950' Admin = '900' diff --git a/backend/dataall/aws/handlers/glue.py b/backend/dataall/aws/handlers/glue.py deleted file mode 100644 index 06c1c8bc4..000000000 --- a/backend/dataall/aws/handlers/glue.py +++ /dev/null @@ -1,32 +0,0 @@ -import logging - -from botocore.exceptions import ClientError - -from .sts import SessionHelper - -log = logging.getLogger('aws:glue') - - -class Glue: - def __init__(self): - pass - - @staticmethod - def table_exists(**data): - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - database = data.get('database', 'UndefinedDatabaseName') - table_name = data.get('tablename', 'UndefinedTableName') - try: - table = ( - SessionHelper.remote_session(accountid) - .client('glue', region_name=region) - .get_table( - CatalogId=data['accountid'], DatabaseName=database, Name=table_name - ) - ) - log.info(f'Glue table found: {data}') - return table - except ClientError: - log.info(f'Glue table not found: {data}') - return None diff --git a/backend/dataall/aws/handlers/redshift.py b/backend/dataall/aws/handlers/redshift.py deleted file mode 100644 index 0810cbc09..000000000 --- a/backend/dataall/aws/handlers/redshift.py +++ /dev/null @@ -1,618 +0,0 @@ -import json -import logging -from datetime import datetime, timedelta - -from botocore.exceptions import ClientError - -from .glue import Glue -from .service_handlers import Worker -from .sts import SessionHelper -from ... import db -from ...db import models -from dataall.modules.datasets_base.db.models import DatasetTable, Dataset -from ...modules.datasets_base.db.dataset_repository import DatasetRepository - -log = logging.getLogger(__name__) - - -class Redshift: - def __init__(self): - pass - - @staticmethod - def get_cluster_from_task(engine, task: models.Task): - with engine.scoped_session() as session: - cluster: models.RedshiftCluster = ( - db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session=session, uri=task.targetUri - ) - ) - return cluster - - @staticmethod - def describe_clusters(**data): - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - if data.get('cluster_id'): - try: - response = client_redshift.describe_clusters( - ClusterIdentifier=data.get('cluster_id'), MaxRecords=21 - ) - return response.get('Clusters')[0] - except ClientError as e: - log.error(e, exc_info=True) - raise e - else: - clusters = [] - try: - marker = None - is_pagination_available = True - while is_pagination_available: - paginator = client_redshift.get_paginator('describe_clusters') - response_iterator = paginator.paginate( - PaginationConfig={'PageSize': 50, 'StartingToken': marker} - ) - for page in response_iterator: - if 'Clusters' in page.keys(): - for cluster in page.get('Clusters'): - clusters.append(cluster) - try: - marker = page['Marker'] - except KeyError: - is_pagination_available = False - return clusters - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - def pause_cluster(**data): - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - try: - response = client_redshift.pause_cluster( - ClusterIdentifier=data['cluster_id'] - ) - return response - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - def reboot_cluster(**data): - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - try: - response = client_redshift.reboot_cluster( - ClusterIdentifier=data['cluster_id'] - ) - return response - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - def resume_cluster(**data): - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - try: - response = client_redshift.resume_cluster( - ClusterIdentifier=data['cluster_id'] - ) - return response - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - def get_cluster_credentials(**data): - try: - secretsmanager = SessionHelper.remote_session(data['accountid']).client( - 'secretsmanager', region_name=data['region'] - ) - dh_secret = secretsmanager.get_secret_value(SecretId=data['secret_name']) - credentials = json.loads(dh_secret['SecretString']) - password = credentials['password'] - return password - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - def run_query(**data): - - log.info(f"Starting query run: {data.get('sql_query')}") - - accountid = data['accountid'] - region = data.get('region', 'eu-west-1') - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - client_redshift_data = session.client('redshift-data', region_name=region) - try: - response = client_redshift.describe_clusters( - ClusterIdentifier=data['cluster_id'], MaxRecords=100 - ) - cluster = response.get('Clusters')[0] - database = data.get('database', cluster.get('DBName')) - statement = dict( - ClusterIdentifier=data['cluster_id'], - Database=database, - Sql=data.get('sql_query'), - WithEvent=data.get('with_event', False), - ) - if data.get('dbuser'): - statement['DbUser'] = data.get('dbuser') - else: - statement['SecretArn'] = data['secret_arn'] - response = client_redshift_data.execute_statement(**statement) - log.info(f'Ran query successfully {response}') - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - @Worker.handler(path='redshift.cluster.init_database') - def init_datahub_db(engine, task: models.Task): - with engine.scoped_session() as session: - cluster: models.RedshiftCluster = ( - db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session=session, uri=task.targetUri - ) - ) - secretsmanager = SessionHelper.remote_session(cluster.AwsAccountId).client( - 'secretsmanager', region_name=cluster.region - ) - dh_secret = Redshift.get_secret(cluster, secretsmanager) - credentials = json.loads(dh_secret['SecretString']) - password = credentials['password'] - log.info(f'Starting {cluster.databaseName} name creation... ') - Redshift._init_database( - cluster, cluster.databaseName, cluster.databaseUser, password - ) - session.commit() - - @staticmethod - def _init_database(cluster, database_name, database_user, password): - queries = list() - queries.append(f'DROP DATABASE IF EXISTS {database_name}') - queries.append(f'DROP USER IF EXISTS {database_user}') - queries.append(f'create user {database_user} password disable') - queries.append(f'create database {database_name} with owner {database_user}') - queries.append( - f'GRANT ALL PRIVILEGES ON database {database_name} TO {database_user}' - ) - queries.append( - f"ALTER USER {database_user} WITH PASSWORD '{password}' " - f"VALID UNTIL '{(datetime.now() + timedelta(days=365)).strftime('%Y-%m-%d %H:%M')}'" - ) - queries.append( - f'GRANT ALL PRIVILEGES ON database {database_name} TO GROUP PUBLIC' - ) - log.info(f'Queries for database {cluster.databaseName} init: {queries} ') - for query in queries: - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.masterDatabaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': query, - } - ) - cluster.external_schema_created = True - - @staticmethod - @Worker.retry(exception=ClientError, tries=4, delay=3, backoff=2, logger=log) - def get_secret(cluster, secretsmanager): - try: - dh_secret = secretsmanager.get_secret_value(SecretId=cluster.datahubSecret) - return dh_secret - except ClientError as e: - log.warning(f'Failed to get secret {cluster.datahubSecret}') - raise e - - @staticmethod - def set_cluster_secrets(secretsmanager, cluster: models.RedshiftCluster): - cluster_secrets = secretsmanager.list_secrets( - MaxResults=3, - Filters=[{'Key': 'tag-value', 'Values': [f'{cluster.CFNStackName}']}], - ) - for s in cluster_secrets['SecretList']: - if f'{cluster.name}-redshift-dhuser' in s['Name']: - cluster.datahubSecret = s['Name'] - if f'{cluster.name}-redshift-masteruser' in s['Name']: - cluster.masterSecret = s['Name'] - return cluster - - @staticmethod - @Worker.handler(path='redshift.cluster.create_external_schema') - def create_external_schemas(engine, task): - with engine.scoped_session() as session: - catalog_databases, cluster, env = Redshift.get_cluster_catalog_databases( - session, task - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'CREATE SCHEMA dataall_{cluster.clusterUri.lower()}', - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'GRANT ALL ON SCHEMA dataall_{cluster.clusterUri.lower()} TO {cluster.databaseUser} ', - } - ) - for database in catalog_databases: - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'drop schema if exists {database}', - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'create external schema {database} ' - f"from data catalog database '{database}' iam_role " - f"'{env.EnvironmentDefaultIAMRoleArn}' ", - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'GRANT ALL ON SCHEMA {database} TO {cluster.databaseUser} ', - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'GRANT ALL ON SCHEMA {database} TO GROUP PUBLIC ', - } - ) - return True - - @staticmethod - @Worker.handler(path='redshift.cluster.drop_external_schema') - def drop_external_schemas(engine, task): - with engine.scoped_session() as session: - catalog_databases, cluster, env = Redshift.get_cluster_catalog_databases( - session, task - ) - database = task.payload['database'] - kill_sessionsquery = ( - f'SELECT pg_terminate_backend(pg_stat_activity.procpid) ' - f'FROM pg_stat_activity ' - f"WHERE pg_stat_activity.datname = '{database}' " - f"AND pg_stat_activity.usename = '{cluster.databaseUser}' " - f'AND procpid <> pg_backend_pid();' - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': kill_sessionsquery, - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'REVOKE ALL ON SCHEMA {database} TO {cluster.databaseUser} ', - } - ) - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.masterUsername, - 'sql_query': f'drop schema {database}', - } - ) - return True - - @staticmethod - def get_cluster_catalog_databases(session, task): - try: - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, task.targetUri - ) - env = db.api.Environment.get_environment_by_uri( - session, cluster.environmentUri - ) - cluster_datasets = db.api.RedshiftCluster.list_all_cluster_datasets( - session, cluster.clusterUri - ) - secretsmanager = SessionHelper.remote_session(cluster.AwsAccountId).client( - 'secretsmanager', region_name=cluster.region - ) - Redshift.set_cluster_secrets(secretsmanager, cluster) - catalog_databases = [] - for d in cluster_datasets: - dataset = DatasetRepository.get_dataset_by_uri(session, d.datasetUri) - if dataset.environmentUri != cluster.environmentUri: - catalog_databases.append(f'{dataset.GlueDatabaseName}shared') - else: - catalog_databases.append(f'{dataset.GlueDatabaseName}') - - log.info(f'Found Schemas to create with Spectrum {catalog_databases}') - except ClientError as e: - log.error(e, exc_info=True) - raise e - return catalog_databases, cluster, env - - @staticmethod - @Worker.handler(path='redshift.cluster.tag') - def tag_cluster(engine, task): - with engine.scoped_session() as session: - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, task.targetUri - ) - try: - accountid = cluster.AwsAccountId - region = cluster.region - session = SessionHelper.remote_session(accountid) - client_redshift = session.client('redshift', region_name=region) - client_redshift.create_tags( - ResourceName=f'arn:aws:redshift:{region}:{accountid}:cluster:{cluster.name}', - Tags=[{'Key': 'dataall', 'Value': 'true'}], - ) - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - @Worker.handler(path='redshift.iam_roles.update') - def update_cluster_roles(engine, task: models.Task): - with engine.scoped_session() as session: - cluster = db.api.RedshiftCluster.get_redshift_cluster_by_uri( - session, task.targetUri - ) - environment: models.Environment = session.query(models.Environment).get( - cluster.environmentUri - ) - log.info( - f'Updating cluster {cluster.name}|{environment.AwsAccountId} ' - f'with environment role {environment.EnvironmentDefaultIAMRoleArn}' - ) - try: - accountid = cluster.AwsAccountId - region = cluster.region - aws_session = SessionHelper.remote_session(accountid) - client_redshift = aws_session.client('redshift', region_name=region) - client_redshift.modify_cluster_iam_roles( - ClusterIdentifier=cluster.name, - AddIamRoles=[ - environment.EnvironmentDefaultIAMRoleArn, - ], - ) - log.info( - f'Successfully Updated cluster {cluster.name}|{environment.AwsAccountId} ' - f'with environment role {environment.EnvironmentDefaultIAMRoleArn}' - ) - except ClientError as e: - log.error(e, exc_info=True) - raise e - - @staticmethod - @Worker.handler(path='redshift.subscriptions.copy') - def copy_data(engine, task: models.Task): - # TODO should be migrated in the redshift module - from dataall.modules.datasets.db.dataset_table_repository import DatasetTableRepository - - with engine.scoped_session() as session: - - environment: models.Environment = session.query(models.Environment).get( - task.targetUri - ) - - dataset: Dataset = DatasetRepository.get_dataset_by_uri( - session, task.payload['datasetUri'] - ) - - table: DatasetTable = DatasetTableRepository.get_dataset_table_by_uri( - session, task.payload['tableUri'] - ) - - env_clusters = ( - session.query(models.RedshiftCluster) - .filter( - models.RedshiftCluster.environmentUri == environment.environmentUri, - ) - .all() - ) - - log.info(f"Received Message {task.payload['message']}") - - message = task.payload['message'] - - if not message: - raise Exception('Task message can not be found') - - glue_table = Glue.table_exists( - **{ - 'accountid': table.AWSAccountId, - 'region': table.region, - 'database': table.GlueDatabaseName, - 'tablename': table.GlueTableName, - } - ) - columns = ( - glue_table.get('Table').get('StorageDescriptor', {}).get('Columns') - ) - log.info(f'Glue table columns: {columns}') - - ddl_columns = ','.join( - [ - f"{col['Name']} {Redshift.convert_to_redshift_types(col['Type'])}" - for col in columns - ] - ) - log.info(f'DDL Columns: {ddl_columns}') - - for cluster in env_clusters: - cluster_dataset_table = ( - db.api.RedshiftCluster.get_cluster_dataset_table( - session, cluster.clusterUri, dataset.datasetUri, table.tableUri - ) - ) - if cluster_dataset_table: - log.info( - f'Cluster {cluster}|{environment.AwsAccountId} ' - f'copy from {dataset.name} for table {table.GlueTableName} is enabled' - ) - queries = list() - queries.append( - f'CREATE SCHEMA IF NOT EXISTS {cluster_dataset_table.schema}' - ) - queries.append( - f'GRANT ALL ON SCHEMA {cluster_dataset_table.schema} TO {cluster.databaseUser}' - ) - queries.append( - f'GRANT ALL ON SCHEMA {cluster_dataset_table.schema} TO GROUP PUBLIC' - ) - queries.append( - Redshift.get_create_table_statement( - cluster_dataset_table.schema, - table.GlueTableName, - ddl_columns, - ) - ) - queries.append( - f'GRANT ALL ON TABLE {cluster_dataset_table.schema}.{table.GlueTableName} TO {cluster.databaseUser}' - ) - queries.append( - f'GRANT ALL ON TABLE {cluster_dataset_table.schema}.{table.GlueTableName} TO GROUP PUBLIC' - ) - data_prefix = Redshift.get_data_prefix(cluster_dataset_table) - queries.extend( - Redshift.get_merge_table_statements( - cluster_dataset_table.schema, - table.GlueTableName, - data_prefix, - environment.EnvironmentDefaultIAMRoleArn, - ddl_columns, - ) - ) - for query in queries: - Redshift.run_query( - **{ - 'accountid': cluster.AwsAccountId, - 'region': cluster.region, - 'cluster_id': cluster.name, - 'database': cluster.databaseName, - 'dbuser': cluster.databaseUser, - 'sql_query': query, - } - ) - return True - - @staticmethod - def get_data_prefix(table: models.RedshiftClusterDatasetTable): - data_prefix = ( - table.dataLocation - if '/packages.delta' not in table.dataLocation - else table.dataLocation.replace('/packages.delta', '') - ) - data_prefix = ( - data_prefix - if '/_symlink_format_manifest' not in data_prefix - else data_prefix.replace('/_symlink_format_manifest', '') - ) - return data_prefix - - @staticmethod - def get_create_table_statement(schema, table_name, columns): - return f'CREATE TABLE IF NOT EXISTS {schema}.{table_name}({columns})' - - @staticmethod - def get_copy_table_statement(schema, table_name, data_prefix, iam_role_arn): - return ( - f'COPY {schema}.{table_name} ' - f"FROM '{data_prefix}' " - f"iam_role '{iam_role_arn}' " - ) - - @staticmethod - def convert_to_redshift_types(dtypes): - redshift_sql_map = { - 'long': 'bigint', - 'double': 'bigint', - 'string': 'varchar(max)', - } - return ( - redshift_sql_map[dtypes.lower()] - if redshift_sql_map.get(dtypes.lower()) - else dtypes - ) - - @staticmethod - def get_merge_table_statements( - schema, table_name, data_prefix, iam_role_arn, columns - ): - statements = list() - statements.append( - f"""CREATE TABLE "{schema}"."{table_name}_stage"({columns});""" - ) - statements.append( - f"""COPY "{schema}"."{table_name}_stage" FROM '{data_prefix}' iam_role '{iam_role_arn}' format as parquet;""" - ) - statements.append( - f"""CREATE TABLE "{schema}"."{table_name}_stage"({columns};""" - ) - statements.append( - f""" - -- Start a new transaction - begin transaction; - - drop table if exists "{schema}"."{table_name}"; - - -- Insert all the rows from the staging table into the target table - alter table "{schema}"."{table_name}_stage" rename to "{table_name}"; - - -- End transaction and commit - end transaction; - """ - ) - return statements diff --git a/backend/dataall/cdkproxy/stacks/__init__.py b/backend/dataall/cdkproxy/stacks/__init__.py index 2f5b55a96..85c1874b3 100644 --- a/backend/dataall/cdkproxy/stacks/__init__.py +++ b/backend/dataall/cdkproxy/stacks/__init__.py @@ -1,6 +1,5 @@ from .environment import EnvironmentSetup from .manager import stack, instanciate_stack, StackManager -from .redshift_cluster import RedshiftStack __all__ = [ 'EnvironmentSetup', diff --git a/backend/dataall/cdkproxy/stacks/policies/__init__.py b/backend/dataall/cdkproxy/stacks/policies/__init__.py index 82537086e..54d85952f 100644 --- a/backend/dataall/cdkproxy/stacks/policies/__init__.py +++ b/backend/dataall/cdkproxy/stacks/policies/__init__.py @@ -1,7 +1,7 @@ """Contains the code for creating environment policies""" from dataall.cdkproxy.stacks.policies import ( - cloudformation, redshift, data_policy, service_policy + cloudformation, data_policy, service_policy ) -__all__ = ["cloudformation", "redshift", "data_policy", "service_policy"] +__all__ = ["cloudformation", "data_policy", "service_policy"] diff --git a/backend/dataall/cdkproxy/stacks/policies/redshift.py b/backend/dataall/cdkproxy/stacks/policies/redshift.py deleted file mode 100644 index 5b3684ad4..000000000 --- a/backend/dataall/cdkproxy/stacks/policies/redshift.py +++ /dev/null @@ -1,72 +0,0 @@ -from aws_cdk import aws_iam as iam - -from dataall.db import permissions -from .service_policy import ServicePolicy - - -class Redshift(ServicePolicy): - def get_statements(self, group_permissions, **kwargs): - if permissions.CREATE_REDSHIFT_CLUSTER not in group_permissions: - return [] - - return [ - iam.PolicyStatement( - actions=[ - 'redshift:List*', - 'redshift:ModifySavedQuery', - 'redshift:CreateSavedQuery', - 'redshift:FetchResults', - 'redshift:ViewQueriesFromConsole', - 'redshift:CancelQuery', - 'redshift:Describe*', - 'redshift:ExecuteQuery', - 'redshift:DeleteSavedQueries', - 'redshift-data:ListTables', - 'redshift-data:ListTables', - 'redshift-data:GetStatementResult', - 'redshift-data:CancelStatement', - 'redshift-data:ListSchemas', - 'redshift-data:ExecuteStatement', - 'redshift-data:ListStatements', - 'redshift-data:ListDatabases', - 'redshift-data:DescribeStatement', - ], - resources=['*'], - effect=iam.Effect.ALLOW, - ), - iam.PolicyStatement( - actions=[ - 'redshift:DeleteCluster', - 'redshift:RejectDataShare', - 'redshift:CancelResize', - 'redshift:ModifyClusterIamRoles', - 'redshift:PauseCluster', - 'redshift:ResumeCluster', - 'redshift:CreateEventSubscription', - 'redshift:RebootCluster', - 'redshift:CreateClusterSnapshot', - 'redshift:DeleteClusterSnapshot', - 'redshift:AuthorizeDataShare', - 'redshift:CopyClusterSnapshot', - 'redshift:CreateCluster', - 'redshift:GetClusterCredentials', - 'redshift:JoinGroup', - 'redshift:ModifyCluster', - 'redshift:AssociateDataShareConsumer', - 'redshift:DeleteEventSubscription', - 'redshift:DeauthorizeDataShare', - 'redshift:ModifyEventSubscription', - 'redshift:DisassociateDataShareConsumer', - ], - resources=[ - f'arn:aws:redshift:{self.region}:{self.account}:dbgroup:{self.resource_prefix}*/*', - f'arn:aws:redshift:{self.region}:{self.account}:datashare:{self.resource_prefix}*/*', - f'arn:aws:redshift:{self.region}:{self.account}:dbuser:{self.resource_prefix}*/*', - f'arn:aws:redshift:{self.region}:{self.account}:snapshot:{self.resource_prefix}*/*', - f'arn:aws:redshift:{self.region}:{self.account}:cluster:{self.resource_prefix}*', - f'arn:aws:redshift:{self.region}:{self.account}:eventsubscription:{self.resource_prefix}*', - f'arn:aws:redshift:{self.region}:{self.account}:dbname:{self.resource_prefix}*/*', - ], - effect=iam.Effect.ALLOW, - ), - ] diff --git a/backend/dataall/cdkproxy/stacks/redshift_cluster.py b/backend/dataall/cdkproxy/stacks/redshift_cluster.py deleted file mode 100644 index f546786b6..000000000 --- a/backend/dataall/cdkproxy/stacks/redshift_cluster.py +++ /dev/null @@ -1,189 +0,0 @@ -import json -import logging -import os - -from aws_cdk import ( - aws_ec2 as ec2, - aws_redshift_alpha as redshift, - aws_ec2, - aws_kms, - aws_secretsmanager, - aws_iam, - aws_s3, - RemovalPolicy, - Duration, - Stack, -) -from aws_cdk.aws_secretsmanager import SecretStringGenerator - -from .manager import stack -from ... import db -from ...db import models -from ...db.api import Environment -from ...utils.cdk_nag_utils import CDKNagUtil -from ...utils.runtime_stacks_tagging import TagsUtil - -logger = logging.getLogger(__name__) - - -@stack(stack='redshift') -class RedshiftStack(Stack): - module_name = __file__ - - def get_engine(self) -> db.Engine: - return db.get_engine(envname=os.environ.get('envname', 'local')) - - def get_target(self, target_uri): - engine = self.get_engine() - with engine.scoped_session() as session: - cluster: models.RedshiftCluster = session.query(models.RedshiftCluster).get( - target_uri - ) - environment: models.Environment = session.query(models.Environment).get( - cluster.environmentUri - ) - return cluster, environment - - def get_env_group(self, cluster: models.RedshiftCluster) -> models.EnvironmentGroup: - engine = self.get_engine() - with engine.scoped_session() as session: - env = Environment.get_environment_group( - session, cluster.SamlGroupName, cluster.environmentUri - ) - return env - - def __init__(self, scope, id: str, target_uri: str = None, **kwargs) -> None: - super().__init__(scope, - id, - description="Cloud formation stack of REDSHIFT CLUSTER: {}; URI: {}; DESCRIPTION: {}".format( - self.get_target(target_uri=target_uri)[0].label, - target_uri, - self.get_target(target_uri=target_uri)[0].description, - )[:1024], - **kwargs) - - # Required for dynamic stack tagging - self.target_uri = target_uri - - cluster, environment = self.get_target(target_uri=target_uri) - - env_group = self.get_env_group(cluster) - - if not cluster.imported: - vpc = aws_ec2.Vpc.from_lookup( - self, 'vpcRedshiftcluster', vpc_id=cluster.vpc - ) - - security_group = aws_ec2.SecurityGroup( - self, - f'sg{cluster.name}', - vpc=vpc, - allow_all_outbound=True, - security_group_name=cluster.name, - ) - - key = aws_kms.Key( - self, - f'key{cluster.name}', - removal_policy=RemovalPolicy.RETAIN, - alias=f'{cluster.name}', - enable_key_rotation=True, - ) - - cluster_parameter_group = redshift.ClusterParameterGroup( - self, - 'RedshiftClusterParameterGroup', - description=f'{cluster.name} parameter group', - parameters={ - 'enable_user_activity_logging': 'true', - 'require_ssl': 'true', - }, - ) - - cluster_subnet_group = redshift.ClusterSubnetGroup( - self, - cluster.name, - description=f'Redshift Cluster {cluster.name} subnet group', - vpc=vpc, - removal_policy=RemovalPolicy.DESTROY, - ) - - master_secret = redshift.DatabaseSecret( - self, - f'{environment.resourcePrefix}-msredshift-{cluster.clusterUri}'[:23], - username=cluster.masterUsername, - ) - master_secret.add_rotation_schedule( - id='msRot', - automatically_after=Duration.days(90), - hosted_rotation=aws_secretsmanager.HostedRotation.redshift_single_user(), - ) - redshift_login = redshift.Login( - master_username=master_secret.secret_value_from_json( - 'username' - ).to_string(), - master_password=master_secret.secret_value_from_json('password'), - ) - redshift_role = aws_iam.Role.from_role_arn( - self, 'RedshiftRole', role_arn=env_group.environmentIAMRoleArn - ) - redshift_cluster = redshift.Cluster( - self, - 'RedshiftCluster', - cluster_name=cluster.name, - master_user=redshift_login, - vpc=vpc, - default_database_name=cluster.masterDatabaseName, - cluster_type=redshift.ClusterType.SINGLE_NODE - if cluster.numberOfNodes == 1 - else redshift.ClusterType.MULTI_NODE, - number_of_nodes=None - if cluster.numberOfNodes == 1 - else cluster.numberOfNodes, - node_type=redshift.NodeType(cluster.nodeType.replace('.', '_').upper()), - port=cluster.port, - roles=[redshift_role], - publicly_accessible=False, - encrypted=True, - encryption_key=key, - parameter_group=cluster_parameter_group, - security_groups=[ - security_group, - ], - subnet_group=cluster_subnet_group, - logging_bucket=aws_s3.Bucket.from_bucket_name( - self, - 'EnvLoggingBucket', - f'{environment.EnvironmentDefaultBucketName}', - ), - logging_key_prefix=f'redshift_logs/{cluster.name}/', - ) - - else: - redshift.Cluster.from_cluster_attributes( - self, - 'ImportedRedshiftCluster', - cluster_name=cluster.name, - cluster_endpoint_address=cluster.endpoint, - cluster_endpoint_port=cluster.port, - ) - - dh_user_secret = aws_secretsmanager.Secret( - self, - 'UserSecret', - secret_name=cluster.datahubSecret, - generate_secret_string=SecretStringGenerator( - secret_string_template=json.dumps({'username': cluster.databaseUser}), - generate_string_key='password', - exclude_punctuation=True, - ), - ) - dh_user_secret.add_rotation_schedule( - id='rt', - automatically_after=Duration.days(90), - hosted_rotation=aws_secretsmanager.HostedRotation.redshift_single_user(), - ) - - TagsUtil.add_tags(stack=self, model=models.RedshiftCluster, target_type="redshift") - - CDKNagUtil.check_rules(self) diff --git a/backend/dataall/db/api/__init__.py b/backend/dataall/db/api/__init__.py index 62de5dde1..fd2bd069f 100644 --- a/backend/dataall/db/api/__init__.py +++ b/backend/dataall/db/api/__init__.py @@ -11,5 +11,4 @@ from .glossary import Glossary from .vote import Vote from .notification import Notification -from .redshift_cluster import RedshiftCluster from .vpc import Vpc diff --git a/backend/dataall/db/api/environment.py b/backend/dataall/db/api/environment.py index c65d8cab8..5edc5ecb0 100644 --- a/backend/dataall/db/api/environment.py +++ b/backend/dataall/db/api/environment.py @@ -277,9 +277,6 @@ def invite_group( @staticmethod def validate_permissions(session, uri, g_permissions, group): - if permissions.CREATE_REDSHIFT_CLUSTER in g_permissions: - g_permissions.append(permissions.LIST_ENVIRONMENT_REDSHIFT_CLUSTERS) - if permissions.INVITE_ENVIRONMENT_GROUP in g_permissions: g_permissions.append(permissions.LIST_ENVIRONMENT_GROUPS) g_permissions.append(permissions.REMOVE_ENVIRONMENT_GROUP) @@ -293,7 +290,6 @@ def validate_permissions(session, uri, g_permissions, group): g_permissions.append(permissions.GET_ENVIRONMENT) g_permissions.append(permissions.LIST_ENVIRONMENT_GROUPS) g_permissions.append(permissions.LIST_ENVIRONMENT_GROUP_PERMISSIONS) - g_permissions.append(permissions.LIST_ENVIRONMENT_REDSHIFT_CLUSTERS) g_permissions.append(permissions.LIST_ENVIRONMENT_NETWORKS) g_permissions.append(permissions.CREDENTIALS_ENVIRONMENT) @@ -333,25 +329,7 @@ def remove_group(session, username, groups, uri, data=None, check_perm=None): message=f'Team: {group} is the owner of the environment {environment.name}', ) - group_env_objects_count = ( - session.query(models.Environment) - .outerjoin( - models.RedshiftCluster, - models.RedshiftCluster.environmentUri - == models.Environment.environmentUri, - ) - .filter( - and_( - models.Environment.environmentUri == environment.environmentUri, - or_( - models.RedshiftCluster.SamlGroupName == group, - ), - ) - ) - .count() - ) - - group_env_objects_count += EnvironmentResourceManager.count_group_resources( + group_env_objects_count = EnvironmentResourceManager.count_group_resources( session=session, environment=environment, group_uri=group @@ -882,33 +860,6 @@ def list_all_active_environments(session) -> [models.Environment]: ) return environments - @staticmethod - def list_environment_redshift_clusters_query(session, environment_uri, filter): - q = session.query(models.RedshiftCluster).filter( - models.RedshiftCluster.environmentUri == environment_uri - ) - term = filter.get('term', None) - if term: - q = q.filter( - or_( - models.RedshiftCluster.label.ilike('%' + term + '%'), - models.RedshiftCluster.description.ilike('%' + term + '%'), - ) - ) - return q - - @staticmethod - @has_resource_perm(permissions.LIST_ENVIRONMENT_REDSHIFT_CLUSTERS) - def paginated_environment_redshift_clusters( - session, username, groups, uri, data=None, check_perm=None - ): - query = Environment.list_environment_redshift_clusters_query(session, uri, data) - return paginate( - query=query, - page_size=data.get('pageSize', 10), - page=data.get('page', 1), - ).to_dict() - @staticmethod @has_resource_perm(permissions.GET_ENVIRONMENT) def get_stack( diff --git a/backend/dataall/db/api/redshift_cluster.py b/backend/dataall/db/api/redshift_cluster.py deleted file mode 100644 index 9f0e31fab..000000000 --- a/backend/dataall/db/api/redshift_cluster.py +++ /dev/null @@ -1,583 +0,0 @@ -import logging - -from sqlalchemy import and_, or_, literal - -from .. import models, exceptions, paginate, permissions -from . import has_resource_perm, ResourcePolicy, Environment -from dataall.modules.datasets_base.db.models import DatasetTable, Dataset -from dataall.utils.naming_convention import ( - NamingConventionService, - NamingConventionPattern, -) -from dataall.utils.slugify import slugify - -log = logging.getLogger(__name__) - - -class RedshiftCluster: - def __init__(self): - pass - - @staticmethod - @has_resource_perm(permissions.CREATE_REDSHIFT_CLUSTER) - def create(session, username, groups, uri: str, data: dict = None, check_perm=None): - - RedshiftCluster.__validate_cluster_data(data, uri) - - Environment.check_group_environment_permission( - session=session, - username=username, - groups=groups, - uri=uri, - group=data['SamlGroupName'], - permission_name=permissions.CREATE_REDSHIFT_CLUSTER, - ) - - environment = Environment.get_environment_by_uri(session, uri) - - if not environment.warehousesEnabled: - raise exceptions.UnauthorizedOperation( - action=permissions.CREATE_REDSHIFT_CLUSTER, - message=f'Warehouses feature is disabled for the environment {environment.label}', - ) - - data['clusterName'] = slugify(data['label'], separator='') - - RedshiftCluster.validate_none_existing_cluster( - session, data['clusterName'], environment - ) - redshift_cluster = RedshiftCluster.create_redshift_cluster( - session, username, data, environment - ) - return redshift_cluster - - @staticmethod - def create_redshift_cluster( - session, username, cluster_input, environment: models.Environment - ): - redshift_cluster = models.RedshiftCluster( - environmentUri=environment.environmentUri, - organizationUri=environment.organizationUri, - owner=cluster_input.get('owner', username), - label=cluster_input['label'], - description=cluster_input.get('description'), - masterDatabaseName=cluster_input['masterDatabaseName'], - masterUsername=cluster_input['masterUsername'], - databaseName=cluster_input.get('databaseName', 'datahubdb'), - nodeType=cluster_input['nodeType'], - numberOfNodes=cluster_input['numberOfNodes'], - port=cluster_input.get('port') or 5432, - region=environment.region, - AwsAccountId=environment.AwsAccountId, - status='CREATING', - vpc=cluster_input['vpc'], - subnetIds=cluster_input.get('subnetIds'), - securityGroupIds=cluster_input.get('securityGroupIds'), - IAMRoles=[environment.EnvironmentDefaultIAMRoleArn], - tags=cluster_input.get('tags', []), - SamlGroupName=cluster_input['SamlGroupName'], - imported=False, - ) - session.add(redshift_cluster) - session.commit() - - name = NamingConventionService( - target_uri=redshift_cluster.clusterUri, - target_label=redshift_cluster.label, - pattern=NamingConventionPattern.DEFAULT, - resource_prefix=environment.resourcePrefix, - ).build_compliant_name() - - redshift_cluster.name = name - redshift_cluster.clusterName = name - redshift_cluster.CFNStackName = f'{name}-stack' - redshift_cluster.CFNStackStatus = 'CREATING' - redshift_cluster.kmsAlias = redshift_cluster.clusterName - redshift_cluster.datahubSecret = f'{redshift_cluster.name}-redshift-dhuser' - redshift_cluster.masterSecret = f'{redshift_cluster.name}-redshift-masteruser' - - activity = models.Activity( - action='redshiftcluster:user:create', - label='redshiftcluster:user:create', - owner=username, - summary=f'{username} ' - f'Created Redshift cluster {redshift_cluster.name} ' - f'on Environment {environment.name}|{environment.AwsAccountId}', - targetUri=redshift_cluster.clusterUri, - targetType='redshiftcluster', - ) - session.add(activity) - session.commit() - - ResourcePolicy.attach_resource_policy( - session=session, - group=redshift_cluster.SamlGroupName, - resource_uri=redshift_cluster.clusterUri, - permissions=permissions.REDSHIFT_CLUSTER_ALL, - resource_type=models.RedshiftCluster.__name__, - ) - if environment.SamlGroupName != redshift_cluster.SamlGroupName: - ResourcePolicy.attach_resource_policy( - session=session, - group=environment.SamlGroupName, - permissions=permissions.REDSHIFT_CLUSTER_ALL, - resource_uri=redshift_cluster.clusterUri, - resource_type=Dataset.__name__, - ) - return redshift_cluster - - @staticmethod - def __validate_cluster_data(data, uri): - if not data: - raise exceptions.RequiredParameter('input') - if not data.get('SamlGroupName'): - raise exceptions.RequiredParameter('SamlGroupName') - if not uri: - raise exceptions.RequiredParameter('environmentUri') - if not data.get('label'): - raise exceptions.RequiredParameter('name') - - @staticmethod - def validate_none_existing_cluster(session, cluster_name, environment): - existing_cluster = ( - session.query(models.RedshiftCluster) - .filter( - and_( - models.RedshiftCluster.environmentUri == environment.environmentUri, - models.RedshiftCluster.clusterName == cluster_name, - ) - ) - .first() - ) - if existing_cluster: - raise exceptions.ResourceAlreadyExists( - 'Create Redshift cluster', - f'Redshift Cluster {cluster_name} ' - f'is already assigned to this environment {environment.name}', - ) - - @staticmethod - def update(session, context, cluster_input, clusterUri): - cluster = session.query(models.RedshiftCluster).get(clusterUri) - if not cluster: - raise exceptions.ObjectNotFound('RedshiftCluster', clusterUri) - if 'name' in cluster_input.keys(): - cluster.name = cluster_input.get('name') - if 'description' in cluster_input.keys(): - cluster.description = cluster_input.get('description') - return cluster - - @staticmethod - def get_redshift_cluster_by_uri(session, uri) -> models.RedshiftCluster: - if not uri: - raise exceptions.RequiredParameter('ClusterUri') - cluster = session.query(models.RedshiftCluster).get(uri) - if not cluster: - raise exceptions.ObjectNotFound('RedshiftCluster', uri) - return cluster - - @staticmethod - @has_resource_perm(permissions.LIST_REDSHIFT_CLUSTER_DATASETS) - def list_available_datasets( - session, username, groups, uri: str, data: dict = None, check_perm=None - ): - - # TODO deal with it in redshift module - from dataall.modules.dataset_sharing.db.models import ShareObject, ShareObjectItem - from dataall.modules.dataset_sharing.db.share_object_repository import ShareItemSM - - cluster: models.RedshiftCluster = RedshiftCluster.get_redshift_cluster_by_uri( - session, uri - ) - share_item_shared_states = ShareItemSM.get_share_item_shared_states() - - shared = ( - session.query( - ShareObject.datasetUri.label('datasetUri'), - literal(cluster.clusterUri).label('clusterUri'), - ) - .join( - models.RedshiftCluster, - models.RedshiftCluster.environmentUri - == ShareObject.environmentUri, - ) - .filter( - and_( - models.RedshiftCluster.clusterUri == cluster.clusterUri, - ShareObjectItem.status.in_(share_item_shared_states), - or_( - ShareObject.owner == username, - ShareObject.principalId.in_(groups), - ), - ) - ) - .group_by(ShareObject.datasetUri, models.RedshiftCluster.clusterUri) - ) - created = ( - session.query( - Dataset.datasetUri.label('datasetUri'), - models.RedshiftCluster.clusterUri.label('clusterUri'), - ) - .filter( - and_( - or_( - Dataset.owner == username, - Dataset.SamlAdminGroupName.in_(groups), - ), - RedshiftCluster.clusterUri == cluster.clusterUri, - Dataset.environmentUri - == models.RedshiftCluster.environmentUri, - ) - ) - .group_by(Dataset.datasetUri, models.RedshiftCluster.clusterUri) - ) - all_group_datasets_sub_query = shared.union(created).subquery( - 'all_group_datasets_sub_query' - ) - query = ( - session.query(Dataset) - .join( - all_group_datasets_sub_query, - Dataset.datasetUri == all_group_datasets_sub_query.c.datasetUri, - ) - .outerjoin( - models.RedshiftClusterDataset, - and_( - models.RedshiftClusterDataset.datasetUri == Dataset.datasetUri, - models.RedshiftClusterDataset.clusterUri == cluster.clusterUri, - ), - ) - .filter( - and_( - all_group_datasets_sub_query.c.clusterUri == cluster.clusterUri, - models.RedshiftClusterDataset.datasetUri.is_(None), - Dataset.deleted.is_(None), - ) - ) - ) - if data.get('term'): - term = data.get('term') - query = query.filter( - or_( - Dataset.label.ilike('%' + term + '%'), - Dataset.tags.any(term), - Dataset.topics.any(term), - ) - ) - return paginate( - query, page=data.get('page', 1), page_size=data.get('pageSize', 10) - ).to_dict() - - @staticmethod - @has_resource_perm(permissions.LIST_REDSHIFT_CLUSTER_DATASETS) - def list_cluster_datasets( - session, username, groups, uri: str, data: dict = None, check_perm=None - ): - query = ( - session.query(Dataset) - .join( - models.RedshiftClusterDataset, - Dataset.datasetUri == models.RedshiftClusterDataset.datasetUri, - ) - .filter( - models.RedshiftClusterDataset.clusterUri == uri, - ) - ) - if data.get('term'): - term = data.get('term') - query = query.filter( - or_( - Dataset.label.ilike('%' + term + '%'), - Dataset.tags.any(term), - Dataset.topics.any(term), - ) - ) - return paginate( - query, page=data.get('page', 1), page_size=data.get('pageSize', 10) - ).to_dict() - - @staticmethod - @has_resource_perm(permissions.LIST_REDSHIFT_CLUSTER_DATASETS) - def list_available_cluster_tables( - session, username, groups, uri: str, data: dict = None, check_perm=None - ): - - # TODO deal with it in redshift module - from dataall.modules.dataset_sharing.db.models import ShareObject, ShareObjectItem - from dataall.modules.dataset_sharing.db.share_object_repository import ShareItemSM - - cluster: models.RedshiftCluster = RedshiftCluster.get_redshift_cluster_by_uri( - session, uri - ) - - share_item_shared_states = ShareItemSM.get_share_item_shared_states() - - shared = ( - session.query( - ShareObject.datasetUri.label('datasetUri'), - ShareObjectItem.itemUri.label('tableUri'), - literal(cluster.clusterUri).label('clusterUri'), - ) - .join( - ShareObject, - ShareObject.shareUri == ShareObjectItem.shareUri, - ) - .join( - models.RedshiftCluster, - models.RedshiftCluster.environmentUri - == ShareObject.environmentUri, - ) - .filter( - and_( - models.RedshiftCluster.clusterUri == cluster.clusterUri, - ShareObjectItem.status.in_(share_item_shared_states), - or_( - ShareObject.owner == username, - ShareObject.principalId.in_(groups), - ), - ) - ) - .group_by( - ShareObject.datasetUri, - ShareObjectItem.itemUri, - models.RedshiftCluster.clusterUri, - ) - ) - created = ( - session.query( - DatasetTable.datasetUri.label('datasetUri'), - DatasetTable.tableUri.label('tableUri'), - models.RedshiftCluster.clusterUri.label('clusterUri'), - ) - .join( - Dataset, - DatasetTable.datasetUri == Dataset.datasetUri, - ) - .filter( - and_( - or_( - Dataset.owner == username, - Dataset.SamlAdminGroupName.in_(groups), - ), - models.RedshiftCluster.clusterUri == cluster.clusterUri, - Dataset.environmentUri - == models.RedshiftCluster.environmentUri, - ) - ) - .group_by( - DatasetTable.datasetUri, - DatasetTable.tableUri, - models.RedshiftCluster.clusterUri, - ) - ) - all_group_tables_sub_query = shared.union(created).subquery( - 'all_group_tables_sub_query' - ) - query = ( - session.query(DatasetTable) - .join( - all_group_tables_sub_query, - all_group_tables_sub_query.c.tableUri == DatasetTable.tableUri, - ) - .filter( - models.RedshiftCluster.clusterUri == cluster.clusterUri, - ) - ) - return paginate( - query, page=data.get('page', 1), page_size=data.get('pageSize', 20) - ).to_dict() - - @staticmethod - @has_resource_perm(permissions.GET_REDSHIFT_CLUSTER) - def get_cluster(session, username, groups, uri, data=None, check_perm=True): - cluster = RedshiftCluster.get_redshift_cluster_by_uri(session, uri) - return cluster - - @staticmethod - @has_resource_perm(permissions.ADD_DATASET_TO_REDSHIFT_CLUSTER) - def add_dataset(session, username, groups, uri, data=None, check_perm=True): - cluster = RedshiftCluster.get_redshift_cluster_by_uri(session, uri) - - if cluster.status != 'available': - raise exceptions.AWSResourceNotAvailable( - action='ADD DATASET TO REDSHIFT CLUSTER', - message=f'Cluster {cluster.name} is not on available state ({cluster.status})', - ) - - from dataall.modules.datasets_base.db.dataset_repository import DatasetRepository - dataset = DatasetRepository.get_dataset_by_uri(session, dataset_uri=data['datasetUri']) - - exists = session.query(models.RedshiftClusterDataset).get( - (uri, data['datasetUri']) - ) - if exists: - raise exceptions.ResourceAlreadyExists( - action='ADD DATASET TO REDSHIFT CLUSTER', - message=f'Dataset {dataset.name} is already loaded to cluster {cluster.name}', - ) - - linked_dataset = models.RedshiftClusterDataset( - clusterUri=uri, datasetUri=data['datasetUri'] - ) - session.add(linked_dataset) - - return cluster, dataset - - @staticmethod - @has_resource_perm(permissions.REMOVE_DATASET_FROM_REDSHIFT_CLUSTER) - def remove_dataset_from_cluster( - session, username, groups, uri, data=None, check_perm=True - ): - cluster = RedshiftCluster.get_redshift_cluster_by_uri(session, uri) - session.query(models.RedshiftClusterDatasetTable).filter( - and_( - models.RedshiftClusterDatasetTable.clusterUri == uri, - models.RedshiftClusterDatasetTable.datasetUri == data['datasetUri'], - ) - ).delete() - session.commit() - - dataset = None - exists = session.query(models.RedshiftClusterDataset).get( - (uri, data['datasetUri']) - ) - if exists: - session.delete(exists) - dataset = session.query(Dataset).get(data['datasetUri']) - if not dataset: - raise exceptions.ObjectNotFound('Dataset', data['datasetUri']) - - return cluster, dataset - - @staticmethod - def list_all_cluster_datasets(session, clusterUri): - cluster_datasets = ( - session.query(models.RedshiftClusterDataset) - .filter( - models.RedshiftClusterDataset.datasetUri.isnot(None), - models.RedshiftClusterDataset.clusterUri == clusterUri, - ) - .all() - ) - return cluster_datasets - - @staticmethod - def get_cluster_dataset( - session, clusterUri, datasetUri - ) -> models.RedshiftClusterDataset: - cluster_dataset = ( - session.query(models.RedshiftClusterDataset) - .filter( - and_( - models.RedshiftClusterDataset.clusterUri == clusterUri, - models.RedshiftClusterDataset.datasetUri == datasetUri, - ) - ) - .first() - ) - if not cluster_dataset: - raise Exception( - f'Cluster {clusterUri} is not associated to dataset {datasetUri}' - ) - return cluster_dataset - - @staticmethod - def get_cluster_dataset_table( - session, clusterUri, datasetUri, tableUri - ) -> models.RedshiftClusterDatasetTable: - cluster_dataset_table = ( - session.query(models.RedshiftClusterDatasetTable) - .filter( - and_( - models.RedshiftClusterDatasetTable.clusterUri == clusterUri, - models.RedshiftClusterDatasetTable.datasetUri == datasetUri, - models.RedshiftClusterDatasetTable.tableUri == tableUri, - ) - ) - .first() - ) - if not cluster_dataset_table: - log.error(f'Table {tableUri} copy is not enabled on cluster') - return cluster_dataset_table - - @staticmethod - @has_resource_perm(permissions.ENABLE_REDSHIFT_TABLE_COPY) - def enable_copy_table( - session, username, groups, uri, data=None, check_perm=True - ) -> models.RedshiftClusterDatasetTable: - cluster = RedshiftCluster.get_redshift_cluster_by_uri(session, uri) - - # TODO this dirty hack should be removed in the redshift module or after pipeline migration (circular import) - from dataall.modules.datasets.db.dataset_table_repository import DatasetTableRepository - table = DatasetTableRepository.get_dataset_table_by_uri( - session, data['tableUri'] - ) - table = models.RedshiftClusterDatasetTable( - clusterUri=uri, - datasetUri=data['datasetUri'], - tableUri=data['tableUri'], - enabled=True, - schema=data['schema'] or f'datahub_{cluster.clusterUri}', - databaseName=cluster.databaseName, - dataLocation=f's3://{table.S3BucketName}/{data.get("dataLocation")}' - if data.get('dataLocation') - else table.S3Prefix, - ) - session.add(table) - session.commit() - return table - - @staticmethod - @has_resource_perm(permissions.DISABLE_REDSHIFT_TABLE_COPY) - def disable_copy_table( - session, username, groups, uri, data=None, check_perm=True - ) -> bool: - table = ( - session.query(models.RedshiftClusterDatasetTable) - .filter( - and_( - models.RedshiftClusterDatasetTable.clusterUri == uri, - models.RedshiftClusterDatasetTable.datasetUri == data['datasetUri'], - models.RedshiftClusterDatasetTable.tableUri == data['tableUri'], - ) - ) - .first() - ) - session.delete(table) - session.commit() - return True - - @staticmethod - @has_resource_perm(permissions.LIST_REDSHIFT_CLUSTER_DATASETS) - def list_copy_enabled_tables( - session, username, groups, uri, data=None, check_perm=True - ) -> [models.RedshiftClusterDatasetTable]: - q = ( - session.query(DatasetTable) - .join( - models.RedshiftClusterDatasetTable, - models.RedshiftClusterDatasetTable.tableUri - == DatasetTable.tableUri, - ) - .filter(models.RedshiftClusterDatasetTable.clusterUri == uri) - ) - if data.get('term'): - term = data.get('term') - q = q.filter( - DatasetTable.label.ilike('%' + term + '%'), - ) - return paginate( - q, page=data.get('page', 1), page_size=data.get('pageSize', 20) - ).to_dict() - - @staticmethod - def delete_all_cluster_linked_objects(session, clusterUri): - session.query(models.RedshiftClusterDatasetTable).filter( - and_( - models.RedshiftClusterDatasetTable.clusterUri == clusterUri, - ) - ).delete() - session.query(models.RedshiftClusterDataset).filter( - models.RedshiftClusterDataset.clusterUri == clusterUri, - ).delete() - return True diff --git a/backend/dataall/db/models/RedshiftCluster.py b/backend/dataall/db/models/RedshiftCluster.py deleted file mode 100644 index 2997da416..000000000 --- a/backend/dataall/db/models/RedshiftCluster.py +++ /dev/null @@ -1,41 +0,0 @@ -from sqlalchemy import Column, String, ARRAY, Integer, Boolean, ForeignKey -from sqlalchemy.orm import query_expression - -from .. import utils, Resource, Base - - -class RedshiftCluster(Resource, Base): - __tablename__ = 'redshiftcluster' - environmentUri = Column(String, ForeignKey("environment.environmentUri"), nullable=False) - organizationUri = Column(String, nullable=False) - clusterUri = Column(String, primary_key=True, default=utils.uuid('cluster')) - clusterArn = Column(String) - clusterName = Column(String) - description = Column(String) - databaseName = Column(String, default='datahubdb') - databaseUser = Column(String, default='datahubuser') - masterUsername = Column(String) - masterDatabaseName = Column(String) - nodeType = Column(String) - numberOfNodes = Column(Integer) - region = Column(String, default='eu-west-1') - AwsAccountId = Column(String) - kmsAlias = Column(String) - status = Column(String, default='CREATING') - vpc = Column(String) - subnetGroupName = Column(String) - subnetIds = Column(ARRAY(String), default=[]) - securityGroupIds = Column(ARRAY(String), default=[]) - CFNStackName = Column(String) - CFNStackStatus = Column(String) - CFNStackArn = Column(String) - IAMRoles = Column(ARRAY(String), default=[]) - endpoint = Column(String) - port = Column(Integer) - datahubSecret = Column(String) - masterSecret = Column(String) - external_schema_created = Column(Boolean, default=False) - SamlGroupName = Column(String) - imported = Column(Boolean, default=False) - userRoleForCluster = query_expression() - userRoleInEnvironment = query_expression() diff --git a/backend/dataall/db/models/RedshiftClusterDataset.py b/backend/dataall/db/models/RedshiftClusterDataset.py deleted file mode 100644 index cfed208a8..000000000 --- a/backend/dataall/db/models/RedshiftClusterDataset.py +++ /dev/null @@ -1,17 +0,0 @@ -import datetime - -from sqlalchemy import Column, DateTime, String, Boolean -from sqlalchemy.orm import query_expression - -from .. import Base - - -class RedshiftClusterDataset(Base): - __tablename__ = 'redshiftcluster_dataset' - clusterUri = Column(String, nullable=False, primary_key=True) - datasetUri = Column(String, nullable=False, primary_key=True) - datasetCopyEnabled = Column(Boolean, default=True) - created = Column(DateTime, default=datetime.datetime.now) - updated = Column(DateTime, onupdate=datetime.datetime.now) - deleted = Column(DateTime) - userRoleForDataset = query_expression() diff --git a/backend/dataall/db/models/RedshiftClusterDatasetTable.py b/backend/dataall/db/models/RedshiftClusterDatasetTable.py deleted file mode 100644 index 1dcd8dc5a..000000000 --- a/backend/dataall/db/models/RedshiftClusterDatasetTable.py +++ /dev/null @@ -1,20 +0,0 @@ -import datetime - -from sqlalchemy import Column, DateTime, String, Boolean - -from .. import Base - - -class RedshiftClusterDatasetTable(Base): - __tablename__ = 'redshiftcluster_datasettable' - clusterUri = Column(String, nullable=False, primary_key=True) - datasetUri = Column(String, nullable=False, primary_key=True) - tableUri = Column(String, nullable=False, primary_key=True) - shareUri = Column(String) - enabled = Column(Boolean, default=False) - schema = Column(String, nullable=False) - databaseName = Column(String, nullable=False) - dataLocation = Column(String, nullable=True) - created = Column(DateTime, default=datetime.datetime.now) - updated = Column(DateTime, onupdate=datetime.datetime.now) - deleted = Column(DateTime) diff --git a/backend/dataall/db/models/__init__.py b/backend/dataall/db/models/__init__.py index 22ecda1fe..86f8dca73 100644 --- a/backend/dataall/db/models/__init__.py +++ b/backend/dataall/db/models/__init__.py @@ -12,9 +12,6 @@ from .Organization import Organization from .OrganizationGroup import OrganizationGroup from .Permission import Permission, PermissionType -from .RedshiftCluster import RedshiftCluster -from .RedshiftClusterDataset import RedshiftClusterDataset -from .RedshiftClusterDatasetTable import RedshiftClusterDatasetTable from .ResourcePolicy import ResourcePolicy from .ResourcePolicyPermission import ResourcePolicyPermission from .Stack import Stack From bc7e5d1aa580a37d09c8701ac5a0f27f67b91b55 Mon Sep 17 00:00:00 2001 From: Balint David Date: Wed, 5 Jul 2023 14:08:18 +0200 Subject: [PATCH 2/7] remove redshift related source code part 2 --- backend/dataall/aws/handlers/sts.py | 7 +- backend/dataall/cdkproxy/requirements.txt | 1 - .../dataall/cdkproxy/stacks/environment.py | 13 - backend/dataall/cdkproxy/stacks/pivot_role.py | 84 --- backend/dataall/db/api/target_type.py | 1 - backend/dataall/db/models/Enums.py | 7 - backend/dataall/db/permissions.py | 42 -- .../modules/datasets/api/dataset/resolvers.py | 10 - .../modules/datasets/api/dataset/schema.py | 16 - .../modules/datasets/api/table/resolvers.py | 19 - .../modules/datasets/api/table/schema.py | 20 +- .../datasets/services/dataset_service.py | 9 - .../tasks/dataset_subscription_task.py | 34 -- .../datasets_base/db/dataset_repository.py | 13 +- .../modules/datasets_base/db/models.py | 3 - ...fc49baecea4_add_enviromental_parameters.py | 44 +- deploy/pivot_role/pivotRole.yaml | 71 --- documentation/userguide/docs/environments.md | 7 +- documentation/userguide/docs/redshift.md | 71 --- documentation/userguide/mkdocs.yml | 2 - tests/api/conftest.py | 51 -- tests/api/test_redshift_cluster.py | 496 ------------------ tests/api/test_stack.py | 4 - tests/cdkproxy/conftest.py | 28 - tests/cdkproxy/test_redshift_cluster_stack.py | 55 -- 25 files changed, 41 insertions(+), 1067 deletions(-) delete mode 100644 documentation/userguide/docs/redshift.md delete mode 100644 tests/api/test_redshift_cluster.py delete mode 100644 tests/cdkproxy/test_redshift_cluster_stack.py diff --git a/backend/dataall/aws/handlers/sts.py b/backend/dataall/aws/handlers/sts.py index d55e23a06..1925a3426 100644 --- a/backend/dataall/aws/handlers/sts.py +++ b/backend/dataall/aws/handlers/sts.py @@ -106,7 +106,7 @@ def get_delegation_role_name(cls): return SessionHelper.get_secret(secret_name=f'dataall-pivot-role-name-{os.getenv("envname", "local")}') @classmethod - def get_console_access_url(cls, boto3_session, region='eu-west-1', bucket=None, redshiftcluster=None): + def get_console_access_url(cls, boto3_session, region='eu-west-1', bucket=None): """Returns an AWS Console access url for the boto3 session Args: boto3_session(object): a boto3 session @@ -134,11 +134,6 @@ def get_console_access_url(cls, boto3_session, region='eu-west-1', bucket=None, request_parameters += '&Destination=' + quote_plus( 'https://{}.console.aws.amazon.com/s3/buckets/{}/'.format(region, bucket) ) - - elif redshiftcluster: - request_parameters += '&Destination=' + quote_plus( - f'https://{region}.console.aws.amazon.com/redshiftv2/' f'home?region={region}#query-editor:' - ) else: request_parameters += '&Destination=' + urllib.parse.quote_plus(f'https://{region}.console.aws.amazon.com/') request_parameters += '&SigninToken=' + signin_token['SigninToken'] diff --git a/backend/dataall/cdkproxy/requirements.txt b/backend/dataall/cdkproxy/requirements.txt index 5aca30653..5552143fb 100644 --- a/backend/dataall/cdkproxy/requirements.txt +++ b/backend/dataall/cdkproxy/requirements.txt @@ -1,5 +1,4 @@ aws-cdk-lib==2.83.1 -aws_cdk.aws_redshift_alpha==2.14.0a0 boto3==1.24.85 boto3-stubs==1.24.85 botocore==1.27.85 diff --git a/backend/dataall/cdkproxy/stacks/environment.py b/backend/dataall/cdkproxy/stacks/environment.py index 52b89c049..34edea620 100644 --- a/backend/dataall/cdkproxy/stacks/environment.py +++ b/backend/dataall/cdkproxy/stacks/environment.py @@ -164,17 +164,6 @@ def __init__(self, scope, id, target_uri: str = None, **kwargs): enforce_ssl=True, ) self.default_environment_bucket = default_environment_bucket - default_environment_bucket.add_to_resource_policy( - iam.PolicyStatement( - sid='RedshiftLogging', - actions=['s3:PutObject', 's3:GetBucketAcl'], - resources=[ - f'{default_environment_bucket.bucket_arn}/*', - default_environment_bucket.bucket_arn, - ], - principals=[iam.ServicePrincipal('redshift.amazonaws.com')], - ) - ) default_environment_bucket.add_to_resource_policy( iam.PolicyStatement( @@ -639,7 +628,6 @@ def create_or_import_environment_default_role(self): iam.ServicePrincipal('athena.amazonaws.com'), iam.ServicePrincipal('states.amazonaws.com'), iam.ServicePrincipal('sagemaker.amazonaws.com'), - iam.ServicePrincipal('redshift.amazonaws.com'), iam.ServicePrincipal('databrew.amazonaws.com'), iam.AccountPrincipal(self._environment.AwsAccountId), ), @@ -708,7 +696,6 @@ def create_group_environment_role(self, group): iam.ServicePrincipal('athena.amazonaws.com'), iam.ServicePrincipal('states.amazonaws.com'), iam.ServicePrincipal('sagemaker.amazonaws.com'), - iam.ServicePrincipal('redshift.amazonaws.com'), iam.AccountPrincipal(self._environment.AwsAccountId), ), ) diff --git a/backend/dataall/cdkproxy/stacks/pivot_role.py b/backend/dataall/cdkproxy/stacks/pivot_role.py index b4c340d31..1ce8804cf 100644 --- a/backend/dataall/cdkproxy/stacks/pivot_role.py +++ b/backend/dataall/cdkproxy/stacks/pivot_role.py @@ -346,62 +346,6 @@ def _create_dataall_policy1(self, env_resource_prefix: str) -> iam.ManagedPolicy 'PivotRolePolicy1', managed_policy_name=f'{env_resource_prefix}-pivotrole-cdk-policy-1', statements=[ - # Redshift - iam.PolicyStatement( - sid='Redshift', - effect=iam.Effect.ALLOW, - actions=[ - 'redshift:DeleteTags', - 'redshift:ModifyClusterIamRoles', - 'redshift:DescribeClusterSecurityGroups', - 'redshift:DescribeClusterSubnetGroups', - 'redshift:pauseCluster', - 'redshift:resumeCluster', - ], - resources=['*'], - conditions={'StringEquals': {'aws:ResourceTag/dataall': 'true'}}, - ), - iam.PolicyStatement( - sid='RedshiftRead', - effect=iam.Effect.ALLOW, - actions=[ - 'redshift:DescribeClusters', - 'redshift:CreateTags', - 'redshift:DescribeClusterSubnetGroups', - ], - resources=['*'], - ), - iam.PolicyStatement( - sid='RedshiftCreds', - effect=iam.Effect.ALLOW, - actions=['redshift:GetClusterCredentials'], - resources=[ - f'arn:aws:redshift:*:{self.account}:dbgroup:*/*', - f'arn:aws:redshift:*:{self.account}:dbname:*/*', - f'arn:aws:redshift:*:{self.account}:dbuser:*/*', - ], - ), - iam.PolicyStatement( - sid='AllowRedshiftSubnet', - effect=iam.Effect.ALLOW, - actions=['redshift:CreateClusterSubnetGroup'], - resources=['*'], - ), - iam.PolicyStatement( - sid='AllowRedshiftDataApi', - effect=iam.Effect.ALLOW, - actions=[ - 'redshift-data:ListTables', - 'redshift-data:GetStatementResult', - 'redshift-data:CancelStatement', - 'redshift-data:ListSchemas', - 'redshift-data:ExecuteStatement', - 'redshift-data:ListStatements', - 'redshift-data:ListDatabases', - 'redshift-data:DescribeStatement', - ], - resources=['*'], - ), # EC2 iam.PolicyStatement( sid='EC2SG', @@ -536,34 +480,6 @@ def _create_dataall_policy1(self, env_resource_prefix: str) -> iam.ManagedPolicy resources=[f'arn:aws:ec2:*:{self.account}:security-group/*'], conditions={'StringEquals': {'aws:RequestTag/dataall': 'true'}}, ), - iam.PolicyStatement( - sid='SGandRedshift', - effect=iam.Effect.ALLOW, - actions=[ - 'ec2:DeleteTags', - 'ec2:DeleteSecurityGroup', - 'redshift:DeleteClusterSubnetGroup' - ], - resources=['*'], - conditions={'ForAnyValue:StringEqualsIfExists': {'aws:ResourceTag/dataall': 'true'}}, - ), - # Redshift - iam.PolicyStatement( - sid='RedshiftDataApi', - effect=iam.Effect.ALLOW, - actions=[ - 'redshift-data:ListTables', - 'redshift-data:GetStatementResult', - 'redshift-data:CancelStatement', - 'redshift-data:ListSchemas', - 'redshift-data:ExecuteStatement', - 'redshift-data:ListStatements', - 'redshift-data:ListDatabases', - 'redshift-data:DescribeStatement', - ], - resources=['*'], - conditions={'StringEqualsIfExists': {'aws:ResourceTag/dataall': 'true'}}, - ), # Dev Tools iam.PolicyStatement( sid='DevTools0', diff --git a/backend/dataall/db/api/target_type.py b/backend/dataall/db/api/target_type.py index 99e22c5d5..49b343139 100644 --- a/backend/dataall/db/api/target_type.py +++ b/backend/dataall/db/api/target_type.py @@ -37,4 +37,3 @@ def is_supported_target_type(target_type): TargetType("environment", permissions.GET_ENVIRONMENT, permissions.UPDATE_ENVIRONMENT) -TargetType("redshift", permissions.GET_REDSHIFT_CLUSTER, permissions.GET_REDSHIFT_CLUSTER) diff --git a/backend/dataall/db/models/Enums.py b/backend/dataall/db/models/Enums.py index 3e11b6489..3e6b3949e 100644 --- a/backend/dataall/db/models/Enums.py +++ b/backend/dataall/db/models/Enums.py @@ -36,13 +36,6 @@ class ProjectMemberRole(Enum): NotContributor = '000' -class RedshiftClusterRole(Enum): - Creator = '950' - Admin = '900' - Shared = '300' - NoPermission = '000' - - class ScheduledQueryRole(Enum): Creator = '950' Admin = '900' diff --git a/backend/dataall/db/permissions.py b/backend/dataall/db/permissions.py index ad6145ec1..8f94cde3b 100644 --- a/backend/dataall/db/permissions.py +++ b/backend/dataall/db/permissions.py @@ -22,7 +22,6 @@ """ TENANT PERMISSIONS """ -MANAGE_REDSHIFT_CLUSTERS = 'MANAGE_REDSHIFT_CLUSTERS' MANAGE_GROUPS = 'MANAGE_GROUPS' MANAGE_ENVIRONMENT = 'MANAGE_ENVIRONMENT' MANAGE_GLOSSARIES = 'MANAGE_GLOSSARIES' @@ -46,8 +45,6 @@ CREDENTIALS_ENVIRONMENT = 'CREDENTIALS_ENVIRONMENT' ENABLE_ENVIRONMENT_SUBSCRIPTIONS = 'ENABLE_ENVIRONMENT_SUBSCRIPTIONS' DISABLE_ENVIRONMENT_SUBSCRIPTIONS = 'DISABLE_ENVIRONMENT_SUBSCRIPTIONS' -CREATE_REDSHIFT_CLUSTER = 'CREATE_REDSHIFT_CLUSTER' -LIST_ENVIRONMENT_REDSHIFT_CLUSTERS = 'LIST_ENVIRONMENT_REDSHIFT_CLUSTERS' CREATE_NETWORK = 'CREATE_NETWORK' LIST_ENVIRONMENT_NETWORKS = 'LIST_ENVIRONMENT_NETWORKS' @@ -57,8 +54,6 @@ GET_ENVIRONMENT, LIST_ENVIRONMENT_GROUPS, LIST_ENVIRONMENT_CONSUMPTION_ROLES, - CREATE_REDSHIFT_CLUSTER, - LIST_ENVIRONMENT_REDSHIFT_CLUSTERS, INVITE_ENVIRONMENT_GROUP, ADD_ENVIRONMENT_CONSUMPTION_ROLES, CREATE_NETWORK, @@ -67,7 +62,6 @@ ENVIRONMENT_INVITATION_REQUEST = [ INVITE_ENVIRONMENT_GROUP, ADD_ENVIRONMENT_CONSUMPTION_ROLES, - CREATE_REDSHIFT_CLUSTER, CREATE_NETWORK, ] ENVIRONMENT_ALL = [ @@ -84,8 +78,6 @@ CREDENTIALS_ENVIRONMENT, ENABLE_ENVIRONMENT_SUBSCRIPTIONS, DISABLE_ENVIRONMENT_SUBSCRIPTIONS, - CREATE_REDSHIFT_CLUSTER, - LIST_ENVIRONMENT_REDSHIFT_CLUSTERS, CREATE_NETWORK, LIST_ENVIRONMENT_NETWORKS, ] @@ -121,7 +113,6 @@ """ TENANT_ALL = [ - MANAGE_REDSHIFT_CLUSTERS, MANAGE_GLOSSARIES, MANAGE_GROUPS, MANAGE_ENVIRONMENTS, @@ -130,43 +121,12 @@ ] TENANT_ALL_WITH_DESC = {k: k for k in TENANT_ALL} -TENANT_ALL_WITH_DESC[MANAGE_REDSHIFT_CLUSTERS] = 'Manage Redshift clusters' TENANT_ALL_WITH_DESC[MANAGE_GLOSSARIES] = 'Manage glossaries' TENANT_ALL_WITH_DESC[MANAGE_ENVIRONMENTS] = 'Manage environments' TENANT_ALL_WITH_DESC[MANAGE_GROUPS] = 'Manage teams' TENANT_ALL_WITH_DESC[MANAGE_ORGANIZATIONS] = 'Manage organizations' TENANT_ALL_WITH_DESC[MANAGE_SGMSTUDIO_NOTEBOOKS] = 'Manage ML studio notebooks' -""" -REDSHIFT CLUSTER -""" -GET_REDSHIFT_CLUSTER = 'GET_REDSHIFT_CLUSTER' -SHARE_REDSHIFT_CLUSTER = 'SHARE_REDSHIFT_CLUSTER' -DELETE_REDSHIFT_CLUSTER = 'DELETE_REDSHIFT_CLUSTER' -REBOOT_REDSHIFT_CLUSTER = 'REBOOT_REDSHIFT_CLUSTER' -RESUME_REDSHIFT_CLUSTER = 'RESUME_REDSHIFT_CLUSTER' -PAUSE_REDSHIFT_CLUSTER = 'PAUSE_REDSHIFT_CLUSTER' -ADD_DATASET_TO_REDSHIFT_CLUSTER = 'ADD_DATASET_TO_REDSHIFT_CLUSTER' -LIST_REDSHIFT_CLUSTER_DATASETS = 'LIST_REDSHIFT_CLUSTER_DATASETS' -REMOVE_DATASET_FROM_REDSHIFT_CLUSTER = 'REMOVE_DATASET_FROM_REDSHIFT_CLUSTER' -ENABLE_REDSHIFT_TABLE_COPY = 'ENABLE_REDSHIFT_TABLE_COPY' -DISABLE_REDSHIFT_TABLE_COPY = 'DISABLE_REDSHIFT_TABLE_COPY' -GET_REDSHIFT_CLUSTER_CREDENTIALS = 'GET_REDSHIFT_CLUSTER_CREDENTIALS' -REDSHIFT_CLUSTER_ALL = [ - GET_REDSHIFT_CLUSTER, - SHARE_REDSHIFT_CLUSTER, - DELETE_REDSHIFT_CLUSTER, - REBOOT_REDSHIFT_CLUSTER, - RESUME_REDSHIFT_CLUSTER, - PAUSE_REDSHIFT_CLUSTER, - ADD_DATASET_TO_REDSHIFT_CLUSTER, - LIST_REDSHIFT_CLUSTER_DATASETS, - REMOVE_DATASET_FROM_REDSHIFT_CLUSTER, - ENABLE_REDSHIFT_TABLE_COPY, - DISABLE_REDSHIFT_TABLE_COPY, - GET_REDSHIFT_CLUSTER_CREDENTIALS, -] - """ NETWORKS """ @@ -182,13 +142,11 @@ ORGANIZATION_ALL + ENVIRONMENT_ALL + CONSUMPTION_ROLE_ALL - + REDSHIFT_CLUSTER_ALL + GLOSSARY_ALL + NETWORK_ALL ) RESOURCES_ALL_WITH_DESC = {k: k for k in RESOURCES_ALL} -RESOURCES_ALL_WITH_DESC[CREATE_REDSHIFT_CLUSTER] = 'Create Redshift clusters on this environment' RESOURCES_ALL_WITH_DESC[INVITE_ENVIRONMENT_GROUP] = 'Invite other teams to this environment' RESOURCES_ALL_WITH_DESC[ADD_ENVIRONMENT_CONSUMPTION_ROLES] = 'Add IAM consumption roles to this environment' RESOURCES_ALL_WITH_DESC[CREATE_NETWORK] = 'Create networks on this environment' diff --git a/backend/dataall/modules/datasets/api/dataset/resolvers.py b/backend/dataall/modules/datasets/api/dataset/resolvers.py index 93f394468..8219b0af8 100644 --- a/backend/dataall/modules/datasets/api/dataset/resolvers.py +++ b/backend/dataall/modules/datasets/api/dataset/resolvers.py @@ -1,7 +1,6 @@ import logging from dataall.api.Objects.Stack import stack_helper -from dataall import db from dataall.api.context import Context from dataall.db import paginate, models from dataall.db.api import Environment @@ -172,15 +171,6 @@ def get_dataset_glossary_terms(context: Context, source: Dataset, **kwargs): return paginate(terms, page_size=100, page=1).to_dict() -def resolve_redshift_copy_enabled(context, source: Dataset, clusterUri: str): - if not source: - return None - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.get_cluster_dataset( - session, clusterUri, source.datasetUri - ).datasetCopyEnabled - - def list_datasets_created_in_environment( context: Context, source, environmentUri: str = None, filter: dict = None ): diff --git a/backend/dataall/modules/datasets/api/dataset/schema.py b/backend/dataall/modules/datasets/api/dataset/schema.py index a329ebf58..acaf4da78 100644 --- a/backend/dataall/modules/datasets/api/dataset/schema.py +++ b/backend/dataall/modules/datasets/api/dataset/schema.py @@ -11,7 +11,6 @@ get_dataset_statistics, list_dataset_share_objects, get_dataset_glossary_terms, - resolve_redshift_copy_enabled, get_dataset_stack ) from dataall.api.constants import EnvironmentPermission @@ -140,21 +139,6 @@ ], type=gql.Ref('DatasetRole'), ), - gql.Field( - name='redshiftClusterPermission', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)) - ], - type=gql.Ref('DatasetRole'), - ), - gql.Field( - name='redshiftDataCopyEnabled', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)) - ], - type=gql.Boolean, - resolver=resolve_redshift_copy_enabled, - ), gql.Field( name='isPublishedInEnvironment', args=[ diff --git a/backend/dataall/modules/datasets/api/table/resolvers.py b/backend/dataall/modules/datasets/api/table/resolvers.py index c69a13c72..cff62d228 100644 --- a/backend/dataall/modules/datasets/api/table/resolvers.py +++ b/backend/dataall/modules/datasets/api/table/resolvers.py @@ -1,6 +1,5 @@ import logging -from dataall import db from dataall.modules.datasets.api.dataset.resolvers import get_dataset from dataall.api.context import Context from dataall.db.api import Glossary @@ -59,23 +58,5 @@ def resolve_glossary_terms(context: Context, source: DatasetTable, **kwargs): ) -def resolve_redshift_copy_schema(context, source: DatasetTable, clusterUri: str): - if not source: - return None - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.get_cluster_dataset_table( - session, clusterUri, source.datasetUri, source.tableUri - ).schema - - -def resolve_redshift_copy_location( - context, source: DatasetTable, clusterUri: str -): - with context.engine.scoped_session() as session: - return db.api.RedshiftCluster.get_cluster_dataset_table( - session, clusterUri, source.datasetUri, source.tableUri - ).dataLocation - - def list_shared_tables_by_env_dataset(context: Context, source, datasetUri: str, envUri: str): return DatasetTableService.list_shared_tables_by_env_dataset(datasetUri, envUri) diff --git a/backend/dataall/modules/datasets/api/table/schema.py b/backend/dataall/modules/datasets/api/table/schema.py index 666bf7e35..8a6ab4f9d 100644 --- a/backend/dataall/modules/datasets/api/table/schema.py +++ b/backend/dataall/modules/datasets/api/table/schema.py @@ -3,9 +3,7 @@ from dataall.modules.datasets.api.table.resolvers import ( resolve_dataset, get_glue_table_properties, - resolve_redshift_copy_location, - resolve_glossary_terms, - resolve_redshift_copy_schema + resolve_glossary_terms ) from dataall.api.constants import GraphQLEnumMapper @@ -59,22 +57,6 @@ resolver=list_table_columns, type=gql.Ref('DatasetTableColumnSearchResult'), ), - gql.Field( - name='RedshiftSchema', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)) - ], - type=gql.String, - resolver=resolve_redshift_copy_schema, - ), - gql.Field( - name='RedshiftCopyDataLocation', - args=[ - gql.Argument(name='clusterUri', type=gql.NonNullableType(gql.String)) - ], - type=gql.String, - resolver=resolve_redshift_copy_location, - ), gql.Field( name='terms', type=gql.Ref('TermSearchResult'), diff --git a/backend/dataall/modules/datasets/services/dataset_service.py b/backend/dataall/modules/datasets/services/dataset_service.py index 0f93d4d64..03bea5444 100644 --- a/backend/dataall/modules/datasets/services/dataset_service.py +++ b/backend/dataall/modules/datasets/services/dataset_service.py @@ -326,15 +326,6 @@ def delete_dataset(uri: str, delete_from_aws: bool = False): message=f'Dataset {dataset.name} is shared with other teams. ' 'Revoke all dataset shares before deletion.', ) - redshift_datasets = DatasetRepository.list_dataset_redshift_clusters( - session, uri - ) - if redshift_datasets: - raise UnauthorizedOperation( - action=DELETE_DATASET, - message='Dataset is used by Redshift clusters. ' - 'Remove clusters associations first.', - ) tables = [t.tableUri for t in DatasetRepository.get_dataset_tables(session, uri)] for uri in tables: diff --git a/backend/dataall/modules/datasets/tasks/dataset_subscription_task.py b/backend/dataall/modules/datasets/tasks/dataset_subscription_task.py index 85b02a2ae..6e308ba25 100644 --- a/backend/dataall/modules/datasets/tasks/dataset_subscription_task.py +++ b/backend/dataall/modules/datasets/tasks/dataset_subscription_task.py @@ -137,10 +137,6 @@ def publish_sns_message( f'Producer message before notifications: {message}' ) - self.redshift_copy( - session, message, dataset, environment, table - ) - message = { 'location': prefix, 'owner': dataset.owner, @@ -177,36 +173,6 @@ def sns_call(message, environment): ) return response - # TODO redshift related code - def redshift_copy( - self, - session, - message, - dataset: Dataset, - environment: models.Environment, - table: DatasetTable, - ): - log.info( - f'Redshift copy starting ' - f'{environment.environmentUri}|{dataset.datasetUri}' - f'|{json_utils.to_json(message)}' - ) - - task = models.Task( - action='redshift.subscriptions.copy', - targetUri=environment.environmentUri, - payload={ - 'datasetUri': dataset.datasetUri, - 'message': json_utils.to_json(message), - 'tableUri': table.tableUri, - }, - ) - session.add(task) - session.commit() - - response = Worker.queue(self.engine, [task.taskUri]) - return response - if __name__ == '__main__': ENVNAME = os.environ.get('envname', 'local') diff --git a/backend/dataall/modules/datasets_base/db/dataset_repository.py b/backend/dataall/modules/datasets_base/db/dataset_repository.py index 878ac9a48..0601b0eae 100644 --- a/backend/dataall/modules/datasets_base/db/dataset_repository.py +++ b/backend/dataall/modules/datasets_base/db/dataset_repository.py @@ -7,7 +7,7 @@ Environment, ) from dataall.db.api import Organization -from dataall.db import models, exceptions, paginate +from dataall.db import models, paginate from dataall.db.exceptions import ObjectNotFound from dataall.db.models.Enums import Language from dataall.modules.datasets_base.db.enums import ConfidentialityClassification @@ -245,17 +245,6 @@ def get_dataset_tables(session, dataset_uri): .all() ) - @staticmethod - def list_dataset_redshift_clusters( - session, dataset_uri - ) -> [models.RedshiftClusterDataset]: - """return the dataset clusters""" - return ( - session.query(models.RedshiftClusterDataset) - .filter(models.RedshiftClusterDataset.datasetUri == dataset_uri) - .all() - ) - @staticmethod def delete_dataset(session, dataset) -> bool: session.delete(dataset) diff --git a/backend/dataall/modules/datasets_base/db/models.py b/backend/dataall/modules/datasets_base/db/models.py index 4f41f4919..7fe46c6a2 100644 --- a/backend/dataall/modules/datasets_base/db/models.py +++ b/backend/dataall/modules/datasets_base/db/models.py @@ -76,7 +76,6 @@ class DatasetTable(Resource, Base): confidentiality = Column(String, nullable=True) userRoleForTable = query_expression() projectPermission = query_expression() - redshiftClusterPermission = query_expression() stage = Column(String, default='RAW') topics = Column(ARRAY(String), nullable=True) confidentiality = Column(String, nullable=False, default='C1') @@ -133,8 +132,6 @@ class Dataset(Resource, Base): SamlAdminGroupName = Column(String, nullable=True) - redshiftClusterPermission = query_expression() - importedS3Bucket = Column(Boolean, default=False) importedGlueDatabase = Column(Boolean, default=False) importedKmsKey = Column(Boolean, default=False) diff --git a/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py b/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py index 871a533e9..c08e8baf7 100644 --- a/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py +++ b/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py @@ -14,7 +14,7 @@ from sqlalchemy.ext.declarative import declarative_base from dataall.db import Resource, models from dataall.db.api import ResourcePolicy, Permission -from dataall.db.models import EnvironmentGroup, PermissionType, ResourcePolicyPermission +from dataall.db.models import EnvironmentGroup, PermissionType, ResourcePolicyPermission, TenantPolicyPermission from dataall.modules.datasets.services.dataset_permissions import LIST_ENVIRONMENT_DATASETS, CREATE_DATASET # revision identifiers, used by Alembic. @@ -25,9 +25,19 @@ Base = declarative_base() -UNUSED_PERMISSIONS = ['LIST_DATASETS', 'LIST_DATASET_TABLES', 'LIST_DATASET_SHARES', 'SUMMARY_DATASET', - 'IMPORT_DATASET', 'UPLOAD_DATASET', 'URL_DATASET', 'STACK_DATASET', 'SUBSCRIPTIONS_DATASET', - 'CREATE_DATASET_TABLE', 'LIST_PIPELINES', 'DASHBOARD_URL'] +UNUSED_RESOURCE_PERMISSIONS = [ + 'LIST_DATASETS', 'LIST_DATASET_TABLES', 'LIST_DATASET_SHARES', 'SUMMARY_DATASET', + 'IMPORT_DATASET', 'UPLOAD_DATASET', 'URL_DATASET', 'STACK_DATASET', 'SUBSCRIPTIONS_DATASET', + 'CREATE_DATASET_TABLE', 'LIST_PIPELINES', 'DASHBOARD_URL', 'GET_REDSHIFT_CLUSTER', + 'SHARE_REDSHIFT_CLUSTER', 'DELETE_REDSHIFT_CLUSTER', 'REBOOT_REDSHIFT_CLUSTER', 'RESUME_REDSHIFT_CLUSTER', + 'PAUSE_REDSHIFT_CLUSTER', 'ADD_DATASET_TO_REDSHIFT_CLUSTER', 'LIST_REDSHIFT_CLUSTER_DATASETS', + 'REMOVE_DATASET_FROM_REDSHIFT_CLUSTER', 'ENABLE_REDSHIFT_TABLE_COPY', 'DISABLE_REDSHIFT_TABLE_COPY', + 'GET_REDSHIFT_CLUSTER_CREDENTIALS', 'CREATE_REDSHIFT_CLUSTER', 'LIST_ENVIRONMENT_REDSHIFT_CLUSTERS' +] + +UNUSED_TENANT_PERMISSIONS = [ + 'MANAGE_REDSHIFT_CLUSTERS' +] class Environment(Resource, Base): @@ -108,7 +118,7 @@ def upgrade(): session.commit() migrate_groups_permissions(session) - delete_unused_resource_permissions(session) + delete_unused_permissions(session) except Exception as ex: print(f"Failed to execute the migration script due to: {ex}") @@ -145,8 +155,7 @@ def downgrade(): dashboardsEnabled=params["dashboardsEnabled"] == "true" )) - for name in UNUSED_PERMISSIONS: - Permission.save_permission(session, name, name, PermissionType.RESOURCE.value) + save_deleted_permissions(session) session.add_all(envs) print("Dropping environment_parameter table...") @@ -205,8 +214,8 @@ def migrate_groups_permissions(session): ) -def delete_unused_resource_permissions(session): - for name in UNUSED_PERMISSIONS: +def delete_unused_permissions(session): + for name in UNUSED_RESOURCE_PERMISSIONS: perm = Permission.get_permission_by_name(session, name, PermissionType.RESOURCE.value) ( session.query(ResourcePolicyPermission) @@ -214,3 +223,20 @@ def delete_unused_resource_permissions(session): .delete() ) session.delete(perm) + + for name in UNUSED_TENANT_PERMISSIONS: + perm = Permission.get_permission_by_name(session, name, PermissionType.TENANT.value) + ( + session.query(TenantPolicyPermission) + .filter(TenantPolicyPermission.permissionUri == perm.permissionUri) + .delete() + ) + session.delete(perm) + + +def save_deleted_permissions(session): + for name in UNUSED_RESOURCE_PERMISSIONS: + Permission.save_permission(session, name, name, PermissionType.RESOURCE.value) + + for name in UNUSED_TENANT_PERMISSIONS: + Permission.save_permission(session, name, name, PermissionType.TENANT.value) diff --git a/deploy/pivot_role/pivotRole.yaml b/deploy/pivot_role/pivotRole.yaml index dca0e7009..7d4a19d61 100644 --- a/deploy/pivot_role/pivotRole.yaml +++ b/deploy/pivot_role/pivotRole.yaml @@ -256,51 +256,6 @@ Resources: PolicyDocument: Version: 2012-10-17 Statement: - - Sid: Redshift - Effect: Allow - Action: - - 'redshift:DeleteTags' - - 'redshift:ModifyClusterIamRoles' - - 'redshift:DescribeClusterSecurityGroups' - - 'redshift:DescribeClusterSubnetGroups' - - 'redshift:pauseCluster' - - 'redshift:resumeCluster' - Resource: '*' - Condition: - StringEquals: - 'aws:ResourceTag/dataall': 'true' - - Sid: RedshiftRead - Effect: Allow - Action: - - 'redshift:DescribeClusters' - - 'redshift:CreateTags' - - 'redshift:DescribeClusterSubnetGroups' - Resource: '*' - - Sid: RedshiftCreds - Effect: Allow - Action: - - 'redshift:GetClusterCredentials' - Resource: - - !Sub 'arn:aws:redshift:*:${AWS::AccountId}:dbgroup:*/*' - - !Sub 'arn:aws:redshift:*:${AWS::AccountId}:dbname:*/*' - - !Sub 'arn:aws:redshift:*:${AWS::AccountId}:dbuser:*/*' - - Sid: AllowRedshiftSubnet - Effect: Allow - Action: - - 'redshift:CreateClusterSubnetGroup' - Resource: '*' - - Sid: AllowRedshiftDataApi - Effect: Allow - Action: - - 'redshift-data:ListTables' - - 'redshift-data:GetStatementResult' - - 'redshift-data:CancelStatement' - - 'redshift-data:ListSchemas' - - 'redshift-data:ExecuteStatement' - - 'redshift-data:ListStatements' - - 'redshift-data:ListDatabases' - - 'redshift-data:DescribeStatement' - Resource: '*' - Sid: EC2SG Effect: Allow Action: @@ -418,32 +373,6 @@ Resources: Condition: StringEquals: 'aws:RequestTag/dataall': 'true' - - Sid: SGandRedshift - Effect: Allow - Action: - - 'ec2:DeleteTags' - - 'ec2:DeleteSecurityGroup' - - 'redshift:DeleteClusterSubnetGroup' - Resource: - - '*' - Condition: - 'ForAnyValue:StringEqualsIfExists': - 'aws:ResourceTag/dataall': 'true' - - Sid: RedshiftDataApi - Effect: Allow - Action: - - 'redshift-data:ListTables' - - 'redshift-data:GetStatementResult' - - 'redshift-data:CancelStatement' - - 'redshift-data:ListSchemas' - - 'redshift-data:ExecuteStatement' - - 'redshift-data:ListStatements' - - 'redshift-data:ListDatabases' - - 'redshift-data:DescribeStatement' - Resource: '*' - Condition: - StringEqualsIfExists: - 'aws:ResourceTag/dataall': 'true' - Sid: DevTools0 Effect: Allow Action: 'cloudformation:ValidateTemplate' diff --git a/documentation/userguide/docs/environments.md b/documentation/userguide/docs/environments.md index 63084ef37..70ff0c66e 100644 --- a/documentation/userguide/docs/environments.md +++ b/documentation/userguide/docs/environments.md @@ -186,7 +186,6 @@ the environment organization. There are several tabs just below the environment - Teams: list of all teams onboarded to this environment. - Datasets: list of all datasets owned and shared with for this environment - Networks: VPCs created and owned by the environment -- Warehouses: Redshift clusters imported or created in this environment - Subscriptions: SNS topic subscriptions enabled or disabled in the environment - Tags: editable key-value tags - Stack: CloudFormation stack details and logs @@ -266,9 +265,9 @@ disabled as appears in the following picture. ![](pictures/environments/env_teams_2.png#zoom#shadow) When the invitation is saved, the environment CloudFormation stack gets automatically updated and creates a -new IAM role for the new team. The IAM role policies mapped to the permissions granted to the invited team -(e.g., a team invited without "Create Redshift clusters" permission will not have -redshift permissions on the associated IAM role).To remove a group, in the *Actions* column select the minus icon. +new IAM role for the new team. The IAM role policies are mapped to the permissions and are granted to the invited team +(e.g., a team invited without "Create ML Studio" permission will not have +Sagemaker permissions on the associated IAM role).To remove a group, in the *Actions* column select the minus icon. !!! warning "Automated permission assignment" diff --git a/documentation/userguide/docs/redshift.md b/documentation/userguide/docs/redshift.md deleted file mode 100644 index a1b03a96e..000000000 --- a/documentation/userguide/docs/redshift.md +++ /dev/null @@ -1,71 +0,0 @@ -# **Integrations** - -## **Data Warehouse** -Datahub natively supports Amazon Redshift, -which allows you to integrate seamlessly your Redshift cluster with your Datahub environment. - -### **Create Redshift cluster** - -To create an Amazon Redshift cluster: - -1. On left pane under **Play!** choose **Warehouses** then **Create** -2. The **creation form** opens. -3. Choose the environment where the cluster will - be created. -4. Fill in the form with the cluster properties (The AWS VPC must have private subnets) -5. Save the form -![create_cluster](pictures/integrations/create_cluster.png#zoom#shadow) - -!!! success - **You created a new Amazon Redshift cluster!** - -### **Import Redshift cluster** - -If you already have data stored on Amazon S3 buckets, Datahub got you covered with the import feature. - -To import a dataset: - -1. On left pane choose **Contribute** then **Import** -2. The **dataset form** opens. -3. Choose the environment where the dataset will - be created. -4. In **Dataset label**, enter a name for your dataset. -5. Grab your Amazon S3 bucket name and put it on bucket name field. - -![import_dataset](pictures/integrations/import_cluster.png#zoom#shadow) -!!! success - **You imported an existing Redshift cluster to Datahub!** - -### 📥 **Load datasets to your cluster with Spectrum** - -Datahub offers natively an integration with Redshift Spectrum -to load your data from Amazon S3 to your cluster. -To load a dataset: - -1. Select your Redshift cluster -2. Go to **Datasets** tab. -3. Click on **Load Datasets** and choose the dataset you want to load. - ![load_dataset](pictures/integrations/load_dataset.png#zoom#shadow) -4. Use the connection details on the connection tab to access your cluster database - ![connection](pictures/integrations/connection.png#zoom#shadow) - ![connect_redshift](pictures/integrations/connect_redshift.png#zoom#shadow) -5. Query you dataset on Redshift. - ![query_loaded_dataset](pictures/integrations/query_loaded_dataset.png#zoom#shadow) - -### 🖨️ **Copy dataset table to your cluster with COPY command** -As data subscriber, Datahub can automate copying data from S3 to your Redshift cluster, -when data producers publish an update. - -🧙 Load the dataset first, then manage its tables copy subscriptions. - -To manage data copy: -1. Select your Redshift cluster -2. Go to **Tables** tab. - ![enable_copy](pictures/integrations/enable_copy.png#zoom#shadow) -3. Click on **Subscribe** and choose the table you want to copy on the cluster and the target schema where - the table will be created. -!!!abstract "COPY confirmed" - Now your table will have the latest snapshot of data from the producers **at each update.** - -The latest table data snapshot is created on the assigned schema -![copy_table.png](pictures/integrations/copy_table.png#zoom#shadow) diff --git a/documentation/userguide/mkdocs.yml b/documentation/userguide/mkdocs.yml index 13373a05a..222764107 100644 --- a/documentation/userguide/mkdocs.yml +++ b/documentation/userguide/mkdocs.yml @@ -16,7 +16,6 @@ nav: - ML Studio: mlstudio.md - Pipelines: pipelines.md - Dashboards: dashboards.md - # - Warehouses: redshift.md - Security: security.md - Monitoring: monitoring.md - Labs: @@ -26,7 +25,6 @@ nav: #- Exploration with Notebooks and Worksheets: lab_template.md #- Creating and sharing dashboards: lab_template.md #- Using ML Studio: lab_template.md - #- Using Redshift clusters: lab_template.md use_directory_urls: false diff --git a/tests/api/conftest.py b/tests/api/conftest.py index ba31b3a87..92bb41885 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -322,54 +322,3 @@ def env_fixture(env, org_fixture, user, group, tenant, module_mocker): ) env1 = env(org_fixture, 'dev', 'alice', 'testadmins', '111111111111', 'eu-west-1') yield env1 - - -@pytest.fixture(scope='module') -def cluster(env_fixture, org_fixture, client, group): - ouri = org_fixture.organizationUri - euri = env_fixture.environmentUri - group_name = group.name - res = client.query( - """ - mutation createRedshiftCluster { - createRedshiftCluster( - environmentUri:"%(euri)s", - clusterInput:{ - label : "mycluster", - description:"a test cluster", - vpc: "vpc-12345", - databaseName: "mydb", - masterDatabaseName: "masterDatabaseName", - masterUsername:"masterUsername", - nodeType: "multi-node", - numberOfNodes: 2, - subnetIds: ["subnet-1","subnet-2"], - securityGroupIds: ["sg-1","sg-2"], - tags:["test"], - SamlGroupName: "%(group_name)s" - } - ){ - clusterUri - label - description - tags - databaseName - masterDatabaseName - masterUsername - nodeType - numberOfNodes - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - owner - - } - } - """ - % vars(), - 'alice', - groups=[group_name], - ) - print(res) - yield res.data.createRedshiftCluster diff --git a/tests/api/test_redshift_cluster.py b/tests/api/test_redshift_cluster.py deleted file mode 100644 index 1fe8c7d08..000000000 --- a/tests/api/test_redshift_cluster.py +++ /dev/null @@ -1,496 +0,0 @@ -import typing - - -import pytest -import dataall -from dataall.api.constants import RedshiftClusterRole - -from tests.modules.datasets.conftest import dataset, table - -from dataall.modules.datasets_base.db.dataset_repository import DatasetRepository -from dataall.modules.datasets_base.db.models import Dataset - - -@pytest.fixture(scope='module', autouse=True) -def patch_check_dataset(module_mocker): - module_mocker.patch( - 'dataall.modules.datasets.services.dataset_service.DatasetService.check_dataset_account', return_value=True - ) - - -@pytest.fixture(scope='module', autouse=True) -def org1(org, user, group, tenant): - org1 = org('testorg', user.userName, group.name) - yield org1 - - -@pytest.fixture(scope='module', autouse=True) -def env1(env, org1, user, group, tenant): - env1 = env(org1, 'dev', user.userName, group.name, '111111111111', 'eu-west-1') - yield env1 - - -@pytest.fixture(scope='module') -def dataset1(db, user, env1, org1, dataset, group, group3) -> Dataset: - with db.scoped_session() as session: - data = dict( - label='label', - owner=user.userName, - SamlAdminGroupName=group.name, - businessOwnerDelegationEmails=['foo@amazon.com'], - businessOwnerEmail=['bar@amazon.com'], - name='name', - S3BucketName='S3BucketName', - GlueDatabaseName='GlueDatabaseName', - KmsAlias='kmsalias', - AwsAccountId='123456789012', - region='eu-west-1', - IAMDatasetAdminUserArn=f'arn:aws:iam::123456789012:user/dataset', - IAMDatasetAdminRoleArn=f'arn:aws:iam::123456789012:role/dataset', - stewards=group3.name, - ) - dataset = DatasetRepository.create_dataset( - session=session, - username=user.userName, - uri=env1.environmentUri, - data=data, - ) - yield dataset - - -@pytest.fixture(scope='module', autouse=True) -def table1(table, dataset1): - yield table(dataset1, name='table1', username=dataset1.owner) - - -@pytest.fixture(scope='module') -def org2(org: typing.Callable, user2, group2, tenant) -> dataall.db.models.Organization: - yield org('org2', user2.userName, group2.name) - - -@pytest.fixture(scope='module') -def env2( - env: typing.Callable, org2: dataall.db.models.Organization, user2, group2, tenant -) -> dataall.db.models.Environment: - yield env(org2, 'dev', user2.userName, group2.name, '2' * 12, 'eu-west-1') - - -@pytest.fixture(scope='module') -def dataset2(env2, org2, dataset, group2, user2) -> Dataset: - yield dataset( - org=org2, - env=env2, - name=user2.userName, - owner=env2.owner, - group=group2.name, - ) - - -@pytest.fixture(scope='module', autouse=True) -def table2(table, dataset2): - yield table(dataset2, name='table2', username=dataset2.owner) - - -@pytest.fixture(scope='module') -def cluster(env1, org1, client, group): - ouri = org1.organizationUri - euri = env1.environmentUri - group_name = group.name - res = client.query( - """ - mutation createRedshiftCluster { - createRedshiftCluster( - environmentUri:"%(euri)s", - clusterInput:{ - label : "mycluster", - description:"a test cluster", - vpc: "vpc-12345", - databaseName: "mydb", - masterDatabaseName: "masterDatabaseName", - masterUsername:"masterUsername", - nodeType: "multi-node", - numberOfNodes: 2, - subnetIds: ["subnet-1","subnet-2"], - securityGroupIds: ["sg-1","sg-2"], - tags:["test"], - SamlGroupName: "%(group_name)s" - } - ){ - clusterUri - label - description - tags - databaseName - masterDatabaseName - masterUsername - nodeType - numberOfNodes - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - owner - - } - } - """ - % vars(), - 'alice', - groups=[group_name], - ) - print(res) - yield res.data.createRedshiftCluster - - -def test_create(cluster): - assert cluster.clusterUri is not None - assert cluster.label == 'mycluster' - assert cluster.description == 'a test cluster' - assert cluster.tags[0] == 'test' - assert cluster.databaseName == 'mydb' - assert cluster.masterDatabaseName == 'masterDatabaseName' - assert cluster.masterUsername == 'masterUsername' - assert cluster.nodeType == 'multi-node' - assert cluster.numberOfNodes == 2 - assert cluster.subnetIds[0] == 'subnet-1' - assert cluster.securityGroupIds[0] == 'sg-1' - assert cluster.userRoleForCluster == RedshiftClusterRole.Creator.name - - -def test_get_cluster_as_owner(cluster, client, group): - duri = cluster.clusterUri - res = client.query( - """ - query getRedshiftCluster{ - getRedshiftCluster(clusterUri:"%(duri)s"){ - clusterUri - owner - label - description - tags - masterDatabaseName - masterUsername - nodeType - numberOfNodes - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - } - } - """ - % vars(), - username='alice', - groups=[group.name], - ) - print(res) - assert res.data.getRedshiftCluster.clusterUri == duri - - -def test_get_cluster_anonymous(cluster, client): - print(' [¨] ' * 10) - duri = cluster.clusterUri - res = client.query( - """ - query getRedshiftCluster{ - getRedshiftCluster(clusterUri:"%(duri)s"){ - clusterUri - label - description - tags - masterDatabaseName - masterUsername - nodeType - numberOfNodes - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - } - } - """ - % vars(), - username='bob', - ) - print(res) - assert not res.data.getRedshiftCluster - - -def test_list_env_clusters_no_filter(env1, cluster, client, group): - euri = env1.environmentUri - res = client.query( - """ - query listEnvironmentClusters{ - listEnvironmentClusters(environmentUri:"%(euri)s"){ - count - nodes{ - clusterUri - label - userRoleForCluster - } - } - } - """ - % vars(), - username='alice', - groups=[group.name], - ) - print(res) - assert res.data.listEnvironmentClusters.count == 1 - - -def test_list_env_clusters_filter_term(env1, cluster, client, group): - euri = env1.environmentUri - res = client.query( - """ - query listEnvironmentClusters{ - listEnvironmentClusters(environmentUri:"%(euri)s", - filter:{ - term : "mycluster" - } - ){ - count - nodes{ - clusterUri - label - userRoleForCluster - } - } - } - """ - % vars(), - username='alice', - groups=[group.name], - ) - assert res.data.listEnvironmentClusters.count == 1 - - -# def test_list_cluster_available_datasets(env1, cluster, dataset1, client, group): -# res = client.query( -# """ -# query ListRedshiftClusterAvailableDatasets($clusterUri:String!,$filter:RedshiftClusterDatasetFilter){ -# listRedshiftClusterAvailableDatasets(clusterUri:$clusterUri,filter:$filter){ -# count -# page -# pages -# hasNext -# hasPrevious -# nodes{ -# datasetUri -# name -# label -# region -# tags -# userRoleForDataset -# redshiftClusterPermission(clusterUri:$clusterUri) -# description -# organization{ -# name -# organizationUri -# label -# } -# statistics{ -# tables -# locations -# } -# environment{ -# environmentUri -# name -# AwsAccountId -# SamlGroupName -# region -# } -# -# } -# } -# }""", -# clusterUri=cluster.clusterUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# assert res.data.listRedshiftClusterAvailableDatasets.count == 2 -# - -# def test_add_dataset_to_cluster(env1, cluster, dataset1, client, db, group): -# with db.scoped_session() as session: -# cluster = session.query(dataall.db.models.RedshiftCluster).get( -# cluster.clusterUri -# ) -# cluster.status = 'available' -# session.commit() -# res = client.query( -# """ -# mutation addDatasetToRedshiftCluster( -# $clusterUri:String, -# $datasetUri:String, -# ){ -# addDatasetToRedshiftCluster( -# clusterUri:$clusterUri, -# datasetUri:$datasetUri -# ) -# } -# """, -# clusterUri=cluster.clusterUri, -# datasetUri=dataset1.datasetUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# -# -# def test_cluster_tables_copy(env1, cluster, dataset1, env2, client, db, group): -# res = client.query( -# """ -# query listRedshiftClusterAvailableDatasetTables($clusterUri:String!,$filter:DatasetTableFilter){ -# listRedshiftClusterAvailableDatasetTables(clusterUri:$clusterUri,filter:$filter){ -# count -# page -# pages -# hasNext -# hasPrevious -# count -# nodes{ -# tableUri -# name -# label -# GlueDatabaseName -# GlueTableName -# S3Prefix -# } -# } -# }""", -# clusterUri=cluster.clusterUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# assert res.data.listRedshiftClusterAvailableDatasetTables.count == 2 -# -# table = res.data.listRedshiftClusterAvailableDatasetTables.nodes[0] -# -# res = client.query( -# """ -# mutation enableRedshiftClusterDatasetTableCopy( -# $clusterUri:String!, -# $datasetUri:String!, -# $tableUri:String!, -# $schema: String!, -# $dataLocation: String! -# ){ -# enableRedshiftClusterDatasetTableCopy( -# clusterUri:$clusterUri, -# datasetUri:$datasetUri, -# tableUri:$tableUri, -# schema:$schema, -# dataLocation:$dataLocation -# ) -# } -# """, -# clusterUri=cluster.clusterUri, -# datasetUri=dataset1.datasetUri, -# tableUri=table.tableUri, -# schema='myschema', -# username='alice', -# groups=[group.name], -# dataLocation='yes', -# ) -# print(res) -# assert res.data.enableRedshiftClusterDatasetTableCopy -# -# res = client.query( -# """ -# query listRedshiftClusterCopyEnabledTables($clusterUri:String!,$filter:DatasetTableFilter){ -# listRedshiftClusterCopyEnabledTables(clusterUri:$clusterUri,filter:$filter){ -# count -# page -# pages -# hasNext -# hasPrevious -# count -# nodes{ -# tableUri -# name -# label -# GlueDatabaseName -# GlueTableName -# S3Prefix -# RedshiftSchema(clusterUri:$clusterUri) -# RedshiftCopyDataLocation(clusterUri:$clusterUri) -# } -# } -# }""", -# clusterUri=cluster.clusterUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# assert res.data.listRedshiftClusterCopyEnabledTables.count == 1 -# -# res = client.query( -# """ -# mutation disableRedshiftClusterDatasetTableCopy( -# $clusterUri:String!, -# $datasetUri:String!, -# $tableUri:String! -# ){ -# disableRedshiftClusterDatasetTableCopy( -# clusterUri:$clusterUri, -# datasetUri:$datasetUri, -# tableUri:$tableUri -# ) -# } -# """, -# clusterUri=cluster.clusterUri, -# datasetUri=dataset1.datasetUri, -# tableUri=table.tableUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# assert res.data.disableRedshiftClusterDatasetTableCopy -# -# res = client.query( -# """ -# query listRedshiftClusterCopyEnabledTables($clusterUri:String!,$filter:DatasetTableFilter){ -# listRedshiftClusterCopyEnabledTables(clusterUri:$clusterUri,filter:$filter){ -# count -# page -# pages -# hasNext -# hasPrevious -# count -# nodes{ -# tableUri -# name -# label -# GlueDatabaseName -# GlueTableName -# S3Prefix -# } -# } -# }""", -# clusterUri=cluster.clusterUri, -# username='alice', -# groups=[group.name], -# ) -# print(res) -# assert res.data.listRedshiftClusterCopyEnabledTables.count == 0 -# - -def test_delete_cluster(client, cluster, env1, org1, db, module_mocker, group, user): - module_mocker.patch( - 'dataall.aws.handlers.service_handlers.Worker.queue', return_value=True - ) - response = client.query( - """ - mutation deleteRedshiftCluster($clusterUri:String!,$deleteFromAWS:Boolean){ - deleteRedshiftCluster(clusterUri:$clusterUri, deleteFromAWS:$deleteFromAWS) - } - """, - clusterUri=cluster.clusterUri, - deleteFromAWS=True, - username=user.userName, - groups=[group.name], - ) - assert response.data.deleteRedshiftCluster diff --git a/tests/api/test_stack.py b/tests/api/test_stack.py index 329bb9874..c9c3e7b21 100644 --- a/tests/api/test_stack.py +++ b/tests/api/test_stack.py @@ -3,16 +3,12 @@ def test_update_stack( tenant, group, env_fixture, - cluster, ): response = update_stack_query( client, env_fixture.environmentUri, 'environment', group.name ) assert response.data.updateStack.targetUri == env_fixture.environmentUri - response = update_stack_query(client, cluster.clusterUri, 'redshift', group.name) - assert response.data.updateStack.targetUri == cluster.clusterUri - def update_stack_query(client, target_uri, target_type, group): response = client.query( diff --git a/tests/cdkproxy/conftest.py b/tests/cdkproxy/conftest.py index e7ec05637..03093dd54 100644 --- a/tests/cdkproxy/conftest.py +++ b/tests/cdkproxy/conftest.py @@ -58,34 +58,6 @@ def env(db, org: models.Organization) -> models.Environment: yield env -@pytest.fixture(scope='module', autouse=True) -def redshift_cluster(db, env: models.Environment) -> models.RedshiftCluster: - with db.scoped_session() as session: - cluster = models.RedshiftCluster( - environmentUri=env.environmentUri, - organizationUri=env.organizationUri, - owner='owner', - label='cluster', - description='desc', - masterDatabaseName='dev', - masterUsername='masteruser', - databaseName='datahubdb', - nodeType='dc1.large', - numberOfNodes=2, - port=5432, - region=env.region, - AwsAccountId=env.AwsAccountId, - status='CREATING', - vpc='vpc-12344', - IAMRoles=[env.EnvironmentDefaultIAMRoleArn], - tags=[], - SamlGroupName='admins', - imported=False, - ) - session.add(cluster) - yield cluster - - @pytest.fixture(scope='function', autouse=True) def patch_ssm(mocker): mocker.patch( diff --git a/tests/cdkproxy/test_redshift_cluster_stack.py b/tests/cdkproxy/test_redshift_cluster_stack.py deleted file mode 100644 index ab738a491..000000000 --- a/tests/cdkproxy/test_redshift_cluster_stack.py +++ /dev/null @@ -1,55 +0,0 @@ -import json - -import pytest -from aws_cdk import App - -from dataall.cdkproxy.stacks import RedshiftStack - - -@pytest.fixture(scope='function', autouse=True) -def patch_methods(mocker, db, redshift_cluster, env, org): - mocker.patch( - 'dataall.cdkproxy.stacks.redshift_cluster.RedshiftStack.get_engine', - return_value=db, - ) - mocker.patch( - 'dataall.aws.handlers.sts.SessionHelper.get_delegation_role_name', - return_value="dataall-pivot-role-name-pytest", - ) - mocker.patch( - 'dataall.cdkproxy.stacks.redshift_cluster.RedshiftStack.get_target', - return_value=(redshift_cluster, env), - ) - mocker.patch( - 'dataall.utils.runtime_stacks_tagging.TagsUtil.get_engine', return_value=db - ) - mocker.patch( - 'dataall.utils.runtime_stacks_tagging.TagsUtil.get_target', - return_value=redshift_cluster, - ) - mocker.patch( - 'dataall.utils.runtime_stacks_tagging.TagsUtil.get_environment', - return_value=env, - ) - mocker.patch( - 'dataall.utils.runtime_stacks_tagging.TagsUtil.get_organization', - return_value=org, - ) - - -@pytest.fixture(scope='function', autouse=True) -def template(redshift_cluster): - app = App() - RedshiftStack( - app, - 'Cluster', - env={'account': '123456789012', 'region': 'eu-west-1'}, - target_uri=redshift_cluster.clusterUri, - ) - return json.dumps(app.synth().get_stack_by_name('Cluster').template) - - -def test_resources_created(template): - assert 'AWS::Redshift::Cluster' in template - assert 'AWS::SecretsManager::Secret' in template - assert 'AWS::KMS::Key' in template From 8c7b8464986cdab2e89ea3fded847324282e4078 Mon Sep 17 00:00:00 2001 From: Balint David Date: Wed, 5 Jul 2023 14:31:26 +0200 Subject: [PATCH 3/7] remove redshift code from frontend --- .../RedshiftCluster/addDatasetToCluster.js | 18 - .../api/RedshiftCluster/copyTableToCluster.js | 30 - .../src/api/RedshiftCluster/createCluster.js | 26 - .../src/api/RedshiftCluster/deleteCluster.js | 21 - .../disableClusterDatasetCopy.js | 24 - .../enableClusterDatasetCopy.js | 18 - .../src/api/RedshiftCluster/getCluster.js | 75 --- .../getClusterConsoleAccess.js | 14 - .../getClusterDatabaseCredentials.js | 21 - .../src/api/RedshiftCluster/importCluster.js | 26 - .../listAvailableDatasetTables.js | 40 -- .../RedshiftCluster/listAvailableDatasets.js | 59 -- .../listClusterDatasetTables.js | 40 -- .../RedshiftCluster/listClusterDatasets.js | 57 -- .../listEnvironmentClusters.js | 77 --- .../src/api/RedshiftCluster/pauseCluster.js | 14 - .../src/api/RedshiftCluster/rebootCluster.js | 14 - .../removeDatasetFromCluster.js | 18 - .../src/api/RedshiftCluster/resumeCluster.js | 14 - .../src/api/RedshiftCluster/searchClusters.js | 70 -- .../Environments/EnvironmentCreateForm.js | 31 - .../views/Environments/EnvironmentEditForm.js | 31 - .../src/views/Environments/EnvironmentView.js | 5 - .../Environments/EnvironmentWarehouses.js | 218 ------ .../views/Warehouses/WarehouseConnection.js | 104 --- .../Warehouses/WarehouseCopyTableModal.js | 250 ------- .../views/Warehouses/WarehouseCreateForm.js | 619 ------------------ .../views/Warehouses/WarehouseCredentials.js | 122 ---- .../src/views/Warehouses/WarehouseDatasets.js | 292 --------- .../src/views/Warehouses/WarehouseEditForm.js | 357 ---------- .../views/Warehouses/WarehouseImportForm.js | 490 -------------- .../Warehouses/WarehouseLoadDatasetModal.js | 193 ------ .../src/views/Warehouses/WarehouseOverview.js | 56 -- .../src/views/Warehouses/WarehouseTables.js | 250 ------- .../src/views/Warehouses/WarehouseView.js | 343 ---------- 35 files changed, 4037 deletions(-) delete mode 100644 frontend/src/api/RedshiftCluster/addDatasetToCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/copyTableToCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/createCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/deleteCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/disableClusterDatasetCopy.js delete mode 100644 frontend/src/api/RedshiftCluster/enableClusterDatasetCopy.js delete mode 100644 frontend/src/api/RedshiftCluster/getCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/getClusterConsoleAccess.js delete mode 100644 frontend/src/api/RedshiftCluster/getClusterDatabaseCredentials.js delete mode 100644 frontend/src/api/RedshiftCluster/importCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/listAvailableDatasetTables.js delete mode 100644 frontend/src/api/RedshiftCluster/listAvailableDatasets.js delete mode 100644 frontend/src/api/RedshiftCluster/listClusterDatasetTables.js delete mode 100644 frontend/src/api/RedshiftCluster/listClusterDatasets.js delete mode 100644 frontend/src/api/RedshiftCluster/listEnvironmentClusters.js delete mode 100644 frontend/src/api/RedshiftCluster/pauseCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/rebootCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/removeDatasetFromCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/resumeCluster.js delete mode 100644 frontend/src/api/RedshiftCluster/searchClusters.js delete mode 100644 frontend/src/views/Environments/EnvironmentWarehouses.js delete mode 100644 frontend/src/views/Warehouses/WarehouseConnection.js delete mode 100644 frontend/src/views/Warehouses/WarehouseCopyTableModal.js delete mode 100644 frontend/src/views/Warehouses/WarehouseCreateForm.js delete mode 100644 frontend/src/views/Warehouses/WarehouseCredentials.js delete mode 100644 frontend/src/views/Warehouses/WarehouseDatasets.js delete mode 100644 frontend/src/views/Warehouses/WarehouseEditForm.js delete mode 100644 frontend/src/views/Warehouses/WarehouseImportForm.js delete mode 100644 frontend/src/views/Warehouses/WarehouseLoadDatasetModal.js delete mode 100644 frontend/src/views/Warehouses/WarehouseOverview.js delete mode 100644 frontend/src/views/Warehouses/WarehouseTables.js delete mode 100644 frontend/src/views/Warehouses/WarehouseView.js diff --git a/frontend/src/api/RedshiftCluster/addDatasetToCluster.js b/frontend/src/api/RedshiftCluster/addDatasetToCluster.js deleted file mode 100644 index d863c773f..000000000 --- a/frontend/src/api/RedshiftCluster/addDatasetToCluster.js +++ /dev/null @@ -1,18 +0,0 @@ -import { gql } from 'apollo-boost'; - -const addDatasetToCluster = ({ clusterUri, datasetUri }) => ({ - variables: { clusterUri, datasetUri }, - mutation: gql` - mutation addDatasetToRedshiftCluster( - $clusterUri: String - $datasetUri: String - ) { - addDatasetToRedshiftCluster( - clusterUri: $clusterUri - datasetUri: $datasetUri - ) - } - ` -}); - -export default addDatasetToCluster; diff --git a/frontend/src/api/RedshiftCluster/copyTableToCluster.js b/frontend/src/api/RedshiftCluster/copyTableToCluster.js deleted file mode 100644 index f22c9a01e..000000000 --- a/frontend/src/api/RedshiftCluster/copyTableToCluster.js +++ /dev/null @@ -1,30 +0,0 @@ -import { gql } from 'apollo-boost'; - -const copyTableToCluster = ({ - clusterUri, - datasetUri, - tableUri, - schema, - dataLocation -}) => ({ - variables: { clusterUri, datasetUri, tableUri, schema, dataLocation }, - mutation: gql` - mutation enableRedshiftClusterDatasetTableCopy( - $clusterUri: String! - $datasetUri: String! - $tableUri: String! - $schema: String! - $dataLocation: String - ) { - enableRedshiftClusterDatasetTableCopy( - clusterUri: $clusterUri - datasetUri: $datasetUri - tableUri: $tableUri - schema: $schema - dataLocation: $dataLocation - ) - } - ` -}); - -export default copyTableToCluster; diff --git a/frontend/src/api/RedshiftCluster/createCluster.js b/frontend/src/api/RedshiftCluster/createCluster.js deleted file mode 100644 index 12c0b80be..000000000 --- a/frontend/src/api/RedshiftCluster/createCluster.js +++ /dev/null @@ -1,26 +0,0 @@ -import { gql } from 'apollo-boost'; - -const createRedshiftCluster = ({ environmentUri, input }) => ({ - variables: { - environmentUri, - clusterInput: input - }, - mutation: gql` - mutation createRedshiftCluster( - $environmentUri: String! - $clusterInput: NewClusterInput! - ) { - createRedshiftCluster( - environmentUri: $environmentUri - clusterInput: $clusterInput - ) { - clusterUri - name - label - created - } - } - ` -}); - -export default createRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/deleteCluster.js b/frontend/src/api/RedshiftCluster/deleteCluster.js deleted file mode 100644 index 6de330ca0..000000000 --- a/frontend/src/api/RedshiftCluster/deleteCluster.js +++ /dev/null @@ -1,21 +0,0 @@ -import { gql } from 'apollo-boost'; - -const deleteRedshiftCluster = (clusterUri, deleteFromAWS) => ({ - variables: { - clusterUri, - deleteFromAWS - }, - mutation: gql` - mutation deleteRedshiftCluster( - $clusterUri: String! - $deleteFromAWS: Boolean - ) { - deleteRedshiftCluster( - clusterUri: $clusterUri - deleteFromAWS: $deleteFromAWS - ) - } - ` -}); - -export default deleteRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/disableClusterDatasetCopy.js b/frontend/src/api/RedshiftCluster/disableClusterDatasetCopy.js deleted file mode 100644 index bc60b24c0..000000000 --- a/frontend/src/api/RedshiftCluster/disableClusterDatasetCopy.js +++ /dev/null @@ -1,24 +0,0 @@ -import { gql } from 'apollo-boost'; - -const disableRedshiftClusterDatasetCopy = ({ - clusterUri, - datasetUri, - tableUri -}) => ({ - variables: { clusterUri, datasetUri, tableUri }, - mutation: gql` - mutation disableRedshiftClusterDatasetTableCopy( - $clusterUri: String - $datasetUri: String - $tableUri: String - ) { - disableRedshiftClusterDatasetTableCopy( - clusterUri: $clusterUri - datasetUri: $datasetUri - tableUri: $tableUri - ) - } - ` -}); - -export default disableRedshiftClusterDatasetCopy; diff --git a/frontend/src/api/RedshiftCluster/enableClusterDatasetCopy.js b/frontend/src/api/RedshiftCluster/enableClusterDatasetCopy.js deleted file mode 100644 index b91bd11e6..000000000 --- a/frontend/src/api/RedshiftCluster/enableClusterDatasetCopy.js +++ /dev/null @@ -1,18 +0,0 @@ -import { gql } from 'apollo-boost'; - -const enableRedshiftClusterDatasetCopy = ({ clusterUri, datasetUri }) => ({ - variables: { clusterUri, datasetUri }, - mutation: gql` - mutation enableRedshiftClusterDatasetCopy( - $clusterUri: String - $datasetUri: String - ) { - enableRedshiftClusterDatasetCopy( - clusterUri: $clusterUri - datasetUri: $datasetUri - ) - } - ` -}); - -export default enableRedshiftClusterDatasetCopy; diff --git a/frontend/src/api/RedshiftCluster/getCluster.js b/frontend/src/api/RedshiftCluster/getCluster.js deleted file mode 100644 index 6fcd2f7a7..000000000 --- a/frontend/src/api/RedshiftCluster/getCluster.js +++ /dev/null @@ -1,75 +0,0 @@ -import { gql } from 'apollo-boost'; - -const getCluster = (clusterUri) => ({ - variables: { - clusterUri - }, - query: gql` - query GetRedshiftCluster($clusterUri: String!) { - getRedshiftCluster(clusterUri: $clusterUri) { - clusterUri - environmentUri - name - label - description - tags - owner - created - updated - AwsAccountId - region - clusterArn - clusterName - created - databaseName - databaseUser - datahubSecret - masterUsername - masterSecret - masterDatabaseName - nodeType - numberOfNodes - kmsAlias - status - subnetGroupName - CFNStackName - CFNStackStatus - CFNStackArn - port - endpoint - IAMRoles - subnetIds - vpc - securityGroupIds - userRoleForCluster - userRoleInEnvironment - imported - SamlGroupName - organization { - organizationUri - label - name - } - environment { - environmentUri - label - name - } - stack { - stack - status - stackUri - targetUri - accountid - region - stackid - link - outputs - resources - } - } - } - ` -}); - -export default getCluster; diff --git a/frontend/src/api/RedshiftCluster/getClusterConsoleAccess.js b/frontend/src/api/RedshiftCluster/getClusterConsoleAccess.js deleted file mode 100644 index ff0e275d6..000000000 --- a/frontend/src/api/RedshiftCluster/getClusterConsoleAccess.js +++ /dev/null @@ -1,14 +0,0 @@ -import { gql } from 'apollo-boost'; - -const getClusterConsoleAccess = (clusterUri) => ({ - variables: { - clusterUri - }, - query: gql` - query getRedshiftClusterConsoleAccess($clusterUri: String!) { - getRedshiftClusterConsoleAccess(clusterUri: $clusterUri) - } - ` -}); - -export default getClusterConsoleAccess; diff --git a/frontend/src/api/RedshiftCluster/getClusterDatabaseCredentials.js b/frontend/src/api/RedshiftCluster/getClusterDatabaseCredentials.js deleted file mode 100644 index 9c19429f7..000000000 --- a/frontend/src/api/RedshiftCluster/getClusterDatabaseCredentials.js +++ /dev/null @@ -1,21 +0,0 @@ -import { gql } from 'apollo-boost'; - -const getRedshiftClusterDatabaseCredentials = (clusterUri) => ({ - variables: { - clusterUri - }, - query: gql` - query getRedshiftClusterDatabaseCredentials($clusterUri: String!) { - getRedshiftClusterDatabaseCredentials(clusterUri: $clusterUri) { - clusterUri - user - database - port - endpoint - password - } - } - ` -}); - -export default getRedshiftClusterDatabaseCredentials; diff --git a/frontend/src/api/RedshiftCluster/importCluster.js b/frontend/src/api/RedshiftCluster/importCluster.js deleted file mode 100644 index 445f83572..000000000 --- a/frontend/src/api/RedshiftCluster/importCluster.js +++ /dev/null @@ -1,26 +0,0 @@ -import { gql } from 'apollo-boost'; - -const importRedshiftCluster = ({ environmentUri, input }) => ({ - variables: { - environmentUri, - clusterInput: input - }, - mutation: gql` - mutation importRedshiftCluster( - $environmentUri: String! - $clusterInput: ImportClusterInput! - ) { - importRedshiftCluster( - environmentUri: $environmentUri - clusterInput: $clusterInput - ) { - clusterUri - name - label - created - } - } - ` -}); - -export default importRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/listAvailableDatasetTables.js b/frontend/src/api/RedshiftCluster/listAvailableDatasetTables.js deleted file mode 100644 index daea12cf2..000000000 --- a/frontend/src/api/RedshiftCluster/listAvailableDatasetTables.js +++ /dev/null @@ -1,40 +0,0 @@ -import { gql } from 'apollo-boost'; - -const listAvailableDatasetTables = ({ clusterUri, filter }) => ({ - variables: { - clusterUri, - filter - }, - query: gql` - query listRedshiftClusterAvailableDatasetTables( - $clusterUri: String! - $filter: DatasetTableFilter - ) { - listRedshiftClusterAvailableDatasetTables( - clusterUri: $clusterUri - filter: $filter - ) { - count - page - pages - hasNext - hasPrevious - count - nodes { - datasetUri - tableUri - name - label - GlueDatabaseName - GlueTableName - S3Prefix - dataset { - S3BucketName - } - } - } - } - ` -}); - -export default listAvailableDatasetTables; diff --git a/frontend/src/api/RedshiftCluster/listAvailableDatasets.js b/frontend/src/api/RedshiftCluster/listAvailableDatasets.js deleted file mode 100644 index ce817429b..000000000 --- a/frontend/src/api/RedshiftCluster/listAvailableDatasets.js +++ /dev/null @@ -1,59 +0,0 @@ -import { gql } from 'apollo-boost'; - -const listAvailableDatasets = ({ clusterUri, filter }) => ({ - variables: { - clusterUri, - filter - }, - query: gql` - query ListRedshiftClusterAvailableDatasets( - $clusterUri: String! - $filter: RedshiftClusterDatasetFilter - ) { - listRedshiftClusterAvailableDatasets( - clusterUri: $clusterUri - filter: $filter - ) { - count - page - pages - hasNext - hasPrevious - nodes { - datasetUri - name - AwsAccountId - region - S3BucketName - GlueDatabaseName - created - owner - label - region - tags - userRoleForDataset - redshiftClusterPermission(clusterUri: $clusterUri) - description - organization { - name - organizationUri - label - } - statistics { - tables - locations - } - environment { - environmentUri - name - AwsAccountId - SamlGroupName - region - } - } - } - } - ` -}); - -export default listAvailableDatasets; diff --git a/frontend/src/api/RedshiftCluster/listClusterDatasetTables.js b/frontend/src/api/RedshiftCluster/listClusterDatasetTables.js deleted file mode 100644 index 8f0d52037..000000000 --- a/frontend/src/api/RedshiftCluster/listClusterDatasetTables.js +++ /dev/null @@ -1,40 +0,0 @@ -import { gql } from 'apollo-boost'; - -const listClusterDatasetTables = ({ clusterUri, filter }) => ({ - variables: { - clusterUri, - filter - }, - query: gql` - query listRedshiftClusterCopyEnabledTables( - $clusterUri: String! - $filter: DatasetTableFilter - ) { - listRedshiftClusterCopyEnabledTables( - clusterUri: $clusterUri - filter: $filter - ) { - count - page - pages - hasNext - hasPrevious - count - nodes { - datasetUri - tableUri - name - label - GlueDatabaseName - GlueTableName - S3Prefix - AwsAccountId - RedshiftSchema(clusterUri: $clusterUri) - RedshiftCopyDataLocation(clusterUri: $clusterUri) - } - } - } - ` -}); - -export default listClusterDatasetTables; diff --git a/frontend/src/api/RedshiftCluster/listClusterDatasets.js b/frontend/src/api/RedshiftCluster/listClusterDatasets.js deleted file mode 100644 index 7218d860c..000000000 --- a/frontend/src/api/RedshiftCluster/listClusterDatasets.js +++ /dev/null @@ -1,57 +0,0 @@ -import { gql } from 'apollo-boost'; - -const listClusterDatasets = ({ clusterUri, filter }) => ({ - variables: { - clusterUri, - filter - }, - query: gql` - query ListRedshiftClusterDatasets( - $clusterUri: String! - $filter: RedshiftClusterDatasetFilter - ) { - listRedshiftClusterDatasets(clusterUri: $clusterUri, filter: $filter) { - count - page - pages - hasNext - hasPrevious - nodes { - datasetUri - name - AwsAccountId - region - S3BucketName - GlueDatabaseName - created - owner - label - region - tags - userRoleForDataset - redshiftClusterPermission(clusterUri: $clusterUri) - redshiftDataCopyEnabled(clusterUri: $clusterUri) - description - organization { - name - organizationUri - label - } - statistics { - tables - locations - } - environment { - environmentUri - name - AwsAccountId - SamlGroupName - region - } - } - } - } - ` -}); - -export default listClusterDatasets; diff --git a/frontend/src/api/RedshiftCluster/listEnvironmentClusters.js b/frontend/src/api/RedshiftCluster/listEnvironmentClusters.js deleted file mode 100644 index 54a1f8b23..000000000 --- a/frontend/src/api/RedshiftCluster/listEnvironmentClusters.js +++ /dev/null @@ -1,77 +0,0 @@ -import { gql } from 'apollo-boost'; - -const listEnvironmentClusters = (environmentUri, filter) => ({ - variables: { - environmentUri, - filter - }, - query: gql` - query listEnvironmentClusters( - $environmentUri: String! - $filter: RedshiftClusterFilter - ) { - listEnvironmentClusters( - environmentUri: $environmentUri - filter: $filter - ) { - count - page - pages - hasNext - hasPrevious - nodes { - clusterUri - environmentUri - name - label - description - tags - owner - created - updated - AwsAccountId - region - clusterArn - clusterName - created - databaseName - databaseUser - masterUsername - masterDatabaseName - nodeType - numberOfNodes - kmsAlias - status - subnetGroupName - CFNStackName - CFNStackStatus - CFNStackArn - port - endpoint - IAMRoles - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - imported - stack { - status - } - vpc - organization { - organizationUri - label - name - } - environment { - environmentUri - label - name - } - } - } - } - ` -}); - -export default listEnvironmentClusters; diff --git a/frontend/src/api/RedshiftCluster/pauseCluster.js b/frontend/src/api/RedshiftCluster/pauseCluster.js deleted file mode 100644 index e32b1d2fd..000000000 --- a/frontend/src/api/RedshiftCluster/pauseCluster.js +++ /dev/null @@ -1,14 +0,0 @@ -import { gql } from 'apollo-boost'; - -const pauseRedshiftCluster = (clusterUri) => ({ - variables: { - clusterUri - }, - mutation: gql` - mutation pauseRedshiftCluster($clusterUri: String!) { - pauseRedshiftCluster(clusterUri: $clusterUri) - } - ` -}); - -export default pauseRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/rebootCluster.js b/frontend/src/api/RedshiftCluster/rebootCluster.js deleted file mode 100644 index ca499e6b1..000000000 --- a/frontend/src/api/RedshiftCluster/rebootCluster.js +++ /dev/null @@ -1,14 +0,0 @@ -import { gql } from 'apollo-boost'; - -const rebootRedshiftCluster = (clusterUri) => ({ - variables: { - clusterUri - }, - mutation: gql` - mutation rebootRedshiftCluster($clusterUri: String!) { - rebootRedshiftCluster(clusterUri: $clusterUri) - } - ` -}); - -export default rebootRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/removeDatasetFromCluster.js b/frontend/src/api/RedshiftCluster/removeDatasetFromCluster.js deleted file mode 100644 index 3da8fa94d..000000000 --- a/frontend/src/api/RedshiftCluster/removeDatasetFromCluster.js +++ /dev/null @@ -1,18 +0,0 @@ -import { gql } from 'apollo-boost'; - -const removeDatasetFromCluster = ({ clusterUri, datasetUri }) => ({ - variables: { clusterUri, datasetUri }, - mutation: gql` - mutation removeDatasetFromRedshiftCluster( - $clusterUri: String - $datasetUri: String - ) { - removeDatasetFromRedshiftCluster( - clusterUri: $clusterUri - datasetUri: $datasetUri - ) - } - ` -}); - -export default removeDatasetFromCluster; diff --git a/frontend/src/api/RedshiftCluster/resumeCluster.js b/frontend/src/api/RedshiftCluster/resumeCluster.js deleted file mode 100644 index fc9fae8a6..000000000 --- a/frontend/src/api/RedshiftCluster/resumeCluster.js +++ /dev/null @@ -1,14 +0,0 @@ -import { gql } from 'apollo-boost'; - -const resumeRedshiftCluster = (clusterUri) => ({ - variables: { - clusterUri - }, - mutation: gql` - mutation resumeRedshiftCluster($clusterUri: String!) { - resumeRedshiftCluster(clusterUri: $clusterUri) - } - ` -}); - -export default resumeRedshiftCluster; diff --git a/frontend/src/api/RedshiftCluster/searchClusters.js b/frontend/src/api/RedshiftCluster/searchClusters.js deleted file mode 100644 index ca8df03a7..000000000 --- a/frontend/src/api/RedshiftCluster/searchClusters.js +++ /dev/null @@ -1,70 +0,0 @@ -import { gql } from 'apollo-boost'; - -const searchRedshiftClusters = (filter) => ({ - variables: { - filter - }, - query: gql` - query searchRedshiftClusters($filter: RedshiftClusterFilter) { - searchRedshiftClusters(filter: $filter) { - count - page - pages - hasNext - hasPrevious - nodes { - clusterUri - environmentUri - name - label - description - tags - owner - created - updated - AwsAccountId - region - clusterArn - clusterName - created - databaseName - databaseUser - masterUsername - masterDatabaseName - nodeType - numberOfNodes - kmsAlias - status - subnetGroupName - CFNStackName - CFNStackStatus - CFNStackArn - port - endpoint - IAMRoles - subnetIds - securityGroupIds - userRoleForCluster - userRoleInEnvironment - imported - stack { - status - } - vpc - organization { - organizationUri - label - name - } - environment { - environmentUri - label - name - } - } - } - } - ` -}); - -export default searchRedshiftClusters; diff --git a/frontend/src/views/Environments/EnvironmentCreateForm.js b/frontend/src/views/Environments/EnvironmentCreateForm.js index 90e74bbe7..0d6c08c8d 100644 --- a/frontend/src/views/Environments/EnvironmentCreateForm.js +++ b/frontend/src/views/Environments/EnvironmentCreateForm.js @@ -631,37 +631,6 @@ const EnvironmentCreateForm = (props) => { /> - {/* - - - } - label={ - - Warehouses{' '} - - (Requires Amazon Redshift clusters) - - - } - labelPlacement="end" - value={values.warehousesEnabled} - /> - - */} diff --git a/frontend/src/views/Environments/EnvironmentEditForm.js b/frontend/src/views/Environments/EnvironmentEditForm.js index 9a3789985..d739ad7b3 100644 --- a/frontend/src/views/Environments/EnvironmentEditForm.js +++ b/frontend/src/views/Environments/EnvironmentEditForm.js @@ -511,37 +511,6 @@ const EnvironmentEditForm = (props) => { /> - {/* - - - } - label={ - - Warehouses{' '} - - (Requires Amazon Redshift clusters) - - - } - labelPlacement="end" - value={values.warehousesEnabled} - /> - - */} diff --git a/frontend/src/views/Environments/EnvironmentView.js b/frontend/src/views/Environments/EnvironmentView.js index c22d0c461..cb04376e2 100644 --- a/frontend/src/views/Environments/EnvironmentView.js +++ b/frontend/src/views/Environments/EnvironmentView.js @@ -33,7 +33,6 @@ import useClient from '../../hooks/useClient'; import ChevronRightIcon from '../../icons/ChevronRight'; import EnvironmentOverview from './EnvironmentOverview'; import EnvironmentDatasets from './EnvironmentDatasets'; -import EnvironmentWarehouses from './EnvironmentWarehouses'; import Stack from '../Stack/Stack'; import { SET_ERROR } from '../../store/errorReducer'; import { useDispatch } from '../../store'; @@ -59,7 +58,6 @@ const tabs = [ icon: }, { label: 'Networks', value: 'networks', icon: }, - /*{ label: 'Warehouses', value: 'warehouses', icon: },*/ { label: 'Subscriptions', value: 'subscriptions', @@ -259,9 +257,6 @@ const EnvironmentView = () => { {currentTab === 'networks' && ( )} - {currentTab === 'warehouses' && ( - - )} {currentTab === 'subscriptions' && ( { - const client = useClient(); - const navigate = useNavigate(); - const dispatch = useDispatch(); - const [items, setItems] = useState(Defaults.PagedResponseDefault); - const [filter, setFilter] = useState(Defaults.DefaultFilter); - const [loading, setLoading] = useState(null); - const [inputValue, setInputValue] = useState(''); - - const fetchItems = useCallback(async () => { - try { - const response = await client.query( - listEnvironmentClusters(environment.environmentUri, filter) - ); - if (!response.errors) { - setItems({ ...response.data.listEnvironmentClusters }); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (e) { - dispatch({ type: SET_ERROR, error: e.message }); - } finally { - setLoading(false); - } - }, [client, dispatch, filter, environment]); - - useEffect(() => { - if (client) { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, filter.page, dispatch, fetchItems]); - - const handleInputChange = (event) => { - setInputValue(event.target.value); - setFilter({ ...filter, term: event.target.value }); - }; - - const handleInputKeyup = (event) => { - if (event.code === 'Enter') { - fetchItems(); - } - }; - - const handlePageChange = async (event, value) => { - if (value <= items.pages && value !== items.page) { - await setFilter({ ...filter, page: value }); - } - }; - - return ( - - } - title={ - - Redshift Clusters - - } - /> - - - - - - - - ) - }} - onChange={handleInputChange} - onKeyUp={handleInputKeyup} - placeholder="Search" - value={inputValue} - variant="outlined" - /> - - - - - - - - - - - - - Name - Endpoint - Status - Actions - - - {loading ? ( - - ) : ( - - {items.nodes.length > 0 ? ( - items.nodes.map((warehouse) => ( - - {warehouse.label} - {warehouse.endpoint} - - - - - { - navigate( - `/console/warehouse/${warehouse.clusterUri}` - ); - }} - > - - - - - )) - ) : ( - - No Redshift cluster found - - )} - - )} -
- {!loading && items.nodes.length > 0 && ( - - )} -
-
-
- ); -}; - -EnvironmentWarehouses.propTypes = { - environment: PropTypes.object.isRequired -}; - -export default EnvironmentWarehouses; diff --git a/frontend/src/views/Warehouses/WarehouseConnection.js b/frontend/src/views/Warehouses/WarehouseConnection.js deleted file mode 100644 index f6c12b40e..000000000 --- a/frontend/src/views/Warehouses/WarehouseConnection.js +++ /dev/null @@ -1,104 +0,0 @@ -import PropTypes from 'prop-types'; -import { useState } from 'react'; -import { - Box, - Card, - CardContent, - CardHeader, - Divider, - Typography -} from '@mui/material'; -import { LoadingButton } from '@mui/lab'; -import { FaExternalLinkAlt } from 'react-icons/fa'; -import useClient from '../../hooks/useClient'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import getClusterConsoleAccess from '../../api/RedshiftCluster/getClusterConsoleAccess'; - -const WarehouseConnection = (props) => { - const { warehouse } = props; - const client = useClient(); - const dispatch = useDispatch(); - const [openingQueryEditor, setOpeningQueryEditor] = useState(false); - const jdbc = warehouse.endpoint - ? `jdbc:redshift://${warehouse.endpoint}:${warehouse.port}/${warehouse.databaseName}` - : '-'; - const odbc = warehouse.endpoint - ? `Driver={Amazon Redshift (x64)}; Server=${ - warehouse.endpoint || '-' - }; Database=${warehouse.databaseName}` - : '-'; - const goToQueryEditor = async () => { - setOpeningQueryEditor(true); - const response = await client.query( - getClusterConsoleAccess(warehouse.clusterUri) - ); - if (!response.errors) { - window.open(response.data.getRedshiftClusterConsoleAccess, '_blank'); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setOpeningQueryEditor(false); - }; - - return ( - - - - - - - - Endpoint - - - {warehouse.endpoint} - - - - - Port - - - {warehouse.port} - - - - - JDBC URL - - - {jdbc} - - - - - ODBC URL - - - {odbc} - - - - } - sx={{ mr: 1 }} - variant="contained" - onClick={goToQueryEditor} - > - Redshift Query Editor - - - - - ); -}; - -WarehouseConnection.propTypes = { - warehouse: PropTypes.object.isRequired -}; - -export default WarehouseConnection; diff --git a/frontend/src/views/Warehouses/WarehouseCopyTableModal.js b/frontend/src/views/Warehouses/WarehouseCopyTableModal.js deleted file mode 100644 index 70ee6104a..000000000 --- a/frontend/src/views/Warehouses/WarehouseCopyTableModal.js +++ /dev/null @@ -1,250 +0,0 @@ -import PropTypes from 'prop-types'; -import { useSnackbar } from 'notistack'; -import { - Box, - CardContent, - Dialog, - FormHelperText, - MenuItem, - TextField, - Typography -} from '@mui/material'; -import { useCallback, useEffect, useState } from 'react'; -import { Formik } from 'formik'; -import * as Yup from 'yup'; -import { LoadingButton } from '@mui/lab'; -import { CopyAll } from '@mui/icons-material'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import useClient from '../../hooks/useClient'; -import { PagedResponseDefault } from '../../components/defaults'; -import listAvailableDatasetTables from '../../api/RedshiftCluster/listAvailableDatasetTables'; -import copyTableToCluster from '../../api/RedshiftCluster/copyTableToCluster'; -import * as Defaults from '../../components/defaults'; - -const WarehouseCopyTableModal = (props) => { - const client = useClient(); - const { warehouse, onApply, onClose, open, reload, ...other } = props; - const { enqueueSnackbar } = useSnackbar(); - const [filter] = useState(Defaults.SelectListFilter); - const [items, setItems] = useState(PagedResponseDefault); - const [itemOptions, setItemOptions] = useState([]); - const [selectedTable, setSelectedTable] = useState(''); - const dispatch = useDispatch(); - const [loading, setLoading] = useState(true); - - const fetchItems = useCallback(async () => { - setLoading(true); - const response = await client.query( - listAvailableDatasetTables({ - clusterUri: warehouse.clusterUri, - filter - }) - ); - if (!response.errors) { - setItems({ ...response.data.listRedshiftClusterAvailableDatasetTables }); - setItemOptions( - response.data.listRedshiftClusterAvailableDatasetTables.nodes.map( - (e) => ({ ...e, value: e, label: e.label }) - ) - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [client, dispatch, warehouse.clusterUri, filter]); - - async function submit(values, setStatus, setSubmitting, setErrors) { - try { - const input = { - clusterUri: warehouse.clusterUri, - datasetUri: values.table.datasetUri, - tableUri: values.table.tableUri, - schema: values.schema, - dataLocation: values.dataLocation || null - }; - const response = await client.mutate(copyTableToCluster(input)); - if (!response.errors) { - setStatus({ success: true }); - setSubmitting(false); - enqueueSnackbar('Table copy started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - if (reload) { - reload(); - } - if (onApply) { - onApply(); - } - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (err) { - console.error(err); - setStatus({ success: false }); - setErrors({ submit: err.message }); - setSubmitting(false); - dispatch({ type: SET_ERROR, error: err.message }); - } - } - - useEffect(() => { - if (client) { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, fetchItems, dispatch]); - - if (!warehouse) { - return null; - } - - return ( - - - - Copy a table to cluster {warehouse.label} - - -

- You can specify the target schema and the S3 data location for the - copy command. This copy will be done on cluster{' '} - {warehouse.name} - and database {warehouse.databaseName} -

-
- {!loading && items && items.nodes.length <= 0 ? ( - - No tables found. - - ) : ( - - { - await submit(values, setStatus, setSubmitting, setErrors); - }} - > - {({ - errors, - handleBlur, - handleChange, - handleSubmit, - setFieldValue, - isSubmitting, - touched, - values - }) => ( -
- - - - - - { - setFieldValue('table', event.target.value); - setSelectedTable( - `(s3://${event.target.value.dataset.S3BucketName}/)` - ); - }} - select - value={values.table} - variant="outlined" - > - {itemOptions.map((table) => ( - - {table.label} - - ))} - - - - - - - {errors.submit && ( - - {errors.submit} - - )} - - } - color="primary" - disabled={isSubmitting} - type="submit" - variant="contained" - > - Copy table - - -
- )} -
-
- )} -
-
- ); -}; - -WarehouseCopyTableModal.propTypes = { - warehouse: PropTypes.object.isRequired, - onApply: PropTypes.func, - onClose: PropTypes.func, - reload: PropTypes.func, - open: PropTypes.bool.isRequired -}; - -export default WarehouseCopyTableModal; diff --git a/frontend/src/views/Warehouses/WarehouseCreateForm.js b/frontend/src/views/Warehouses/WarehouseCreateForm.js deleted file mode 100644 index 1ea77cc7f..000000000 --- a/frontend/src/views/Warehouses/WarehouseCreateForm.js +++ /dev/null @@ -1,619 +0,0 @@ -import { Link as RouterLink, useNavigate, useParams } from 'react-router-dom'; -import * as Yup from 'yup'; -import { Formik } from 'formik'; -import { useSnackbar } from 'notistack'; -import { - Box, - Breadcrumbs, - Button, - Card, - CardContent, - CardHeader, - CircularProgress, - Container, - FormHelperText, - Grid, - Link, - MenuItem, - TextField, - Typography -} from '@mui/material'; -import { Helmet } from 'react-helmet-async'; -import { LoadingButton } from '@mui/lab'; -import { useCallback, useEffect, useState } from 'react'; -import useClient from '../../hooks/useClient'; -import ChevronRightIcon from '../../icons/ChevronRight'; -import ArrowLeftIcon from '../../icons/ArrowLeft'; -import useSettings from '../../hooks/useSettings'; -import listEnvironments from '../../api/Environment/listEnvironments'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import ChipInput from '../../components/TagsInput'; -import createRedshiftCluster from '../../api/RedshiftCluster/createCluster'; -import listEnvironmentGroups from '../../api/Environment/listEnvironmentGroups'; -import * as Defaults from '../../components/defaults'; - -const WarehouseCreateForm = (props) => { - const navigate = useNavigate(); - const params = useParams(); - const { enqueueSnackbar } = useSnackbar(); - const dispatch = useDispatch(); - const client = useClient(); - const { settings } = useSettings(); - const [loading, setLoading] = useState(true); - const [groupOptions, setGroupOptions] = useState([]); - const [environmentOptions, setEnvironmentOptions] = useState([]); - const [environment, setEnvironment] = useState(null); - const nodeTypes = [ - { label: 'dc2.large', value: 'dc2.large' }, - { label: 'ds2.xlarge', value: 'ds2.xlarge' }, - { label: 'ds2.8xlarge', value: 'ds2.8xlarge' }, - { label: 'dc1.large', value: 'dc1.large' }, - { label: 'dc2.8xlarge', value: 'dc2.8xlarge' }, - { label: 'ra3.16xlarge', value: 'ra3.16xlarge' } - ]; - - const fetchEnvironments = useCallback(async () => { - setLoading(true); - const response = await client.query( - listEnvironments({ filter: Defaults.SelectListFilter }) - ); - if (!response.errors) { - setEnvironmentOptions( - response.data.listEnvironments.nodes.map((e) => ({ - ...e, - value: e.environmentUri, - label: e.label - })) - ); - setEnvironment( - response.data.listEnvironments.nodes[ - response.data.listEnvironments.nodes.findIndex( - (e) => e.environmentUri === params.uri - ) - ] - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [client, dispatch, params.uri]); - - const fetchGroups = useCallback( - async (environmentUri) => { - try { - const response = await client.query( - listEnvironmentGroups({ - filter: Defaults.SelectListFilter, - environmentUri - }) - ); - if (!response.errors) { - setGroupOptions( - response.data.listEnvironmentGroups.nodes.map((g) => ({ - value: g.groupUri, - label: g.groupUri - })) - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (e) { - dispatch({ type: SET_ERROR, error: e.message }); - } - }, - [client, dispatch] - ); - - useEffect(() => { - if (client) { - fetchEnvironments().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, fetchEnvironments, dispatch]); - - async function submit(values, setStatus, setSubmitting, setErrors) { - try { - const input = { - label: values.label, - description: values.description, - vpc: values.vpcId, - tags: values.tags, - nodeType: values.nodeType, - masterDatabaseName: values.masterDatabaseName, - masterUsername: values.masterUsername, - numberOfNodes: parseInt(values.numberOfNodes, 10), - SamlGroupName: values.SamlGroupName, - databaseName: values.databaseName - }; - const response = await client.mutate( - createRedshiftCluster({ - environmentUri: values.environment.environmentUri, - input - }) - ); - if (!response.errors) { - setStatus({ success: true }); - setSubmitting(false); - enqueueSnackbar('Amazon Redshift cluster creation started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - navigate( - `/console/warehouse/${response.data.createRedshiftCluster.clusterUri}` - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (err) { - console.error(err); - setStatus({ success: false }); - setErrors({ submit: err.message }); - setSubmitting(false); - dispatch({ type: SET_ERROR, error: err.message }); - } - } - if (loading || !environmentOptions.length > 0 || !environment) { - return ; - } - - return ( - <> - - Warehouses: Warehouse Create | data.all - - - - - - - Create a new warehouse - - } - sx={{ mt: 1 }} - > - - Organize - - - Environments - - - {environment.label} - - - Warehouses - - - Create - - - - - - - - - - - { - await submit(values, setStatus, setSubmitting, setErrors); - }} - > - {({ - errors, - handleBlur, - handleChange, - handleSubmit, - isSubmitting, - setFieldValue, - touched, - values - }) => ( -
- - - - - - - - - - {touched.description && errors.description && ( - - - {errors.description} - - - )} - - - - - - - - - - - - - - - - - - - - - - environmentOptions[ - environmentOptions.findIndex( - (e) => e.environmentUri === params.uri - ) - ] - } - onChange={(event) => { - setFieldValue('SamlGroupName', ''); - fetchGroups( - event.target.value.environmentUri - ).catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - setFieldValue('environment', event.target.value); - }} - select - value={values.environment} - variant="outlined" - > - {environmentOptions.map((e) => ( - - {e.label} - - ))} - - - - - - - - - - - - - - {nodeTypes.map((node) => ( - - {node.label} - - ))} - - - - - - - - - - - {groupOptions.map((group) => ( - - {group.label} - - ))} - - - - - { - setFieldValue('tags', [...chip]); - }} - /> - - - - {errors.submit && ( - - {errors.submit} - - )} - - - Create Warehouse - - - - -
- )} -
-
-
-
- - ); -}; - -export default WarehouseCreateForm; diff --git a/frontend/src/views/Warehouses/WarehouseCredentials.js b/frontend/src/views/Warehouses/WarehouseCredentials.js deleted file mode 100644 index a47221c2a..000000000 --- a/frontend/src/views/Warehouses/WarehouseCredentials.js +++ /dev/null @@ -1,122 +0,0 @@ -import PropTypes from 'prop-types'; -import { useCallback, useEffect, useState } from 'react'; -import { - Card, - CardContent, - CardHeader, - Divider, - List, - ListItem, - Typography -} from '@mui/material'; -import useClient from '../../hooks/useClient'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import getRedshiftClusterDatabaseCredentials from '../../api/RedshiftCluster/getClusterDatabaseCredentials'; - -const WarehouseCredentials = (props) => { - const { warehouse } = props; - const client = useClient(); - const dispatch = useDispatch(); - const [clusterCredentials, setClusterCredentials] = useState({ - password: '-' - }); - - const getCredentials = useCallback(async () => { - const response = await client.query( - getRedshiftClusterDatabaseCredentials(warehouse.clusterUri) - ); - if (!response.errors) { - setClusterCredentials({ - ...response.data.getRedshiftClusterDatabaseCredentials - }); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - }, [client, warehouse.clusterUri, dispatch]); - - useEffect(() => { - if (client && warehouse) { - getCredentials().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, warehouse, getCredentials, dispatch]); - - return ( - - - - - - - - Cluster identifier - - - {warehouse.name} - - - - - Database name - - - {warehouse.databaseName} - - - - - Database user - - - {warehouse.databaseUser} - - - - - Database password - - - {clusterCredentials?.password || '-'} - - - - - - ); -}; - -WarehouseCredentials.propTypes = { - warehouse: PropTypes.object.isRequired -}; - -export default WarehouseCredentials; diff --git a/frontend/src/views/Warehouses/WarehouseDatasets.js b/frontend/src/views/Warehouses/WarehouseDatasets.js deleted file mode 100644 index b7e820f93..000000000 --- a/frontend/src/views/Warehouses/WarehouseDatasets.js +++ /dev/null @@ -1,292 +0,0 @@ -import PropTypes from 'prop-types'; -import { useCallback, useEffect, useState } from 'react'; -import { - Box, - Card, - CardContent, - CardHeader, - Divider, - Grid, - IconButton, - InputAdornment, - Table, - TableBody, - TableCell, - TableHead, - TableRow, - TextField, - Typography -} from '@mui/material'; -import CircularProgress from '@mui/material/CircularProgress'; -import { DeleteOutlined, Warning } from '@mui/icons-material'; -import { LoadingButton } from '@mui/lab'; -import { useSnackbar } from 'notistack'; -import { BsFolder } from 'react-icons/bs'; -import useClient from '../../hooks/useClient'; -import * as Defaults from '../../components/defaults'; -import Scrollbar from '../../components/Scrollbar'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import SearchIcon from '../../icons/Search'; -import PlusIcon from '../../icons/Plus'; -import DeleteObjectModal from '../../components/DeleteObjectModal'; -import removeDatasetFromCluster from '../../api/RedshiftCluster/removeDatasetFromCluster'; -import WarehouseLoadDatasetModal from './WarehouseLoadDatasetModal'; -import Pager from '../../components/Pager'; -import listClusterDatasets from '../../api/RedshiftCluster/listClusterDatasets'; -import WarehouseTables from './WarehouseTables'; - -const WarehouseDatasets = ({ warehouse }) => { - const client = useClient(); - const dispatch = useDispatch(); - const { enqueueSnackbar } = useSnackbar(); - const [items, setItems] = useState(Defaults.PagedResponseDefault); - const [filter, setFilter] = useState(Defaults.DefaultFilter); - const [loading, setLoading] = useState(null); - const [inputValue, setInputValue] = useState(''); - const [isLoadDatasetsOpen, setIsLoadDatasetsOpen] = useState(false); - const [isDeleteObjectModalOpen, setIsDeleteObjectModalOpen] = useState(false); - const [datasetToDelete, setDatasetToDelete] = useState(null); - const handleDeleteObjectModalOpen = (dataset) => { - setDatasetToDelete(dataset); - setIsDeleteObjectModalOpen(true); - }; - const handleDeleteObjectModalClose = () => { - setDatasetToDelete(null); - setIsDeleteObjectModalOpen(false); - }; - - const fetchItems = useCallback(async () => { - setLoading(true); - const response = await client.query( - listClusterDatasets({ - clusterUri: warehouse.clusterUri, - filter: { ...filter } - }) - ); - if (!response.errors) { - setItems({ ...response.data.listRedshiftClusterDatasets }); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [warehouse.clusterUri, client, dispatch, filter]); - - const handleLoadDatasetsModalOpen = () => { - setIsLoadDatasetsOpen(true); - }; - - const handleLoadDatasetsModalClose = () => { - setIsLoadDatasetsOpen(false); - }; - - const unloadDataset = useCallback(async () => { - const response = await client.mutate( - removeDatasetFromCluster({ - clusterUri: warehouse.clusterUri, - datasetUri: datasetToDelete.datasetUri - }) - ); - if (!response.errors) { - handleDeleteObjectModalClose(); - enqueueSnackbar('Dataset unloaded', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - }, [ - warehouse.clusterUri, - enqueueSnackbar, - fetchItems, - dispatch, - client, - datasetToDelete - ]); - - const handleInputChange = (event) => { - setInputValue(event.target.value); - setFilter({ ...filter, term: event.target.value }); - }; - - const handleInputKeyup = (event) => { - if (event.code === 'Enter') { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }; - - const handlePageChange = async (event, value) => { - if (value <= items.pages && value !== items.page) { - await setFilter({ ...filter, page: value }); - } - }; - - useEffect(() => { - if (client) { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, filter.page, fetchItems, dispatch]); - - return ( - - - } - sx={{ m: 1 }} - variant="outlined" - > - Load dataset - - } - title={ - - - Loaded Datasets - - } - /> - - - - - - - - ) - }} - onChange={handleInputChange} - onKeyUp={handleInputKeyup} - placeholder="Search" - value={inputValue} - variant="outlined" - /> - - - - - - - - - Name - S3 Bucket - Glue Database - Actions - - - {loading ? ( - - ) : ( - - {items.nodes.length > 0 ? ( - items.nodes.map((dataset) => ( - - {dataset.name} - {`s3://${dataset.S3BucketName}`} - {dataset.GlueDatabaseName} - - { - setDatasetToDelete(dataset); - handleDeleteObjectModalOpen(dataset); - }} - > - - - - - )) - ) : ( - - No datasets loaded to cluster. - - )} - - )} -
- {items.nodes.length > 0 && ( - - )} -
-
-
- - {isLoadDatasetsOpen && ( - - )} - - {datasetToDelete && ( - - - - Dataset Spectrum schema will be removed from the - cluster. - - - - } - /> - )} - - - -
- ); -}; - -WarehouseDatasets.propTypes = { - warehouse: PropTypes.object.isRequired -}; - -export default WarehouseDatasets; diff --git a/frontend/src/views/Warehouses/WarehouseEditForm.js b/frontend/src/views/Warehouses/WarehouseEditForm.js deleted file mode 100644 index 13c7977a0..000000000 --- a/frontend/src/views/Warehouses/WarehouseEditForm.js +++ /dev/null @@ -1,357 +0,0 @@ -import { useCallback, useEffect, useState } from 'react'; -import { Link as RouterLink, useNavigate, useParams } from 'react-router-dom'; -import * as Yup from 'yup'; -import { Formik } from 'formik'; -import { useSnackbar } from 'notistack'; -import { - Box, - Breadcrumbs, - Button, - Card, - CardContent, - CardHeader, - CircularProgress, - Container, - FormHelperText, - Grid, - Link, - MenuItem, - TextField, - Typography -} from '@mui/material'; -import { Helmet } from 'react-helmet-async'; -import { LoadingButton } from '@mui/lab'; -import useClient from '../../hooks/useClient'; -import useGroups from '../../hooks/useGroups'; -import ChevronRightIcon from '../../icons/ChevronRight'; -import ArrowLeftIcon from '../../icons/ArrowLeft'; -import useSettings from '../../hooks/useSettings'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import ChipInput from '../../components/TagsInput'; -import getCluster from '../../api/RedshiftCluster/getCluster'; - -const WarehouseEditForm = (props) => { - const dispatch = useDispatch(); - const navigate = useNavigate(); - const params = useParams(); - const { enqueueSnackbar } = useSnackbar(); - const client = useClient(); - const groups = useGroups(); - const { settings } = useSettings(); - const [loading, setLoading] = useState(true); - const [warehouse, setWarehouse] = useState(null); - const groupOptions = groups - ? groups.map((g) => ({ value: g, label: g })) - : []; - - const fetchItem = useCallback(async () => { - setLoading(true); - const response = await client.query(getCluster(params.uri)); - if (!response.errors && response.data.get !== null) { - setWarehouse(response.data.getRedshiftCluster); - } else { - const error = response.errors - ? response.errors[0].message - : 'Warehouse not found'; - dispatch({ type: SET_ERROR, error }); - } - setLoading(false); - }, [client, dispatch, params.uri]); - - useEffect(() => { - if (client) { - fetchItem().catch((e) => dispatch({ type: SET_ERROR, error: e.message })); - } - }, [client, fetchItem, dispatch]); - - async function submit(values, setStatus, setSubmitting, setErrors) { - try { - const response = {}; - if (!response.errors) { - setStatus({ success: true }); - setSubmitting(false); - enqueueSnackbar('Warehouse updated', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - navigate( - `/console/warehouse/${response.data.updateSqlWarehouse.clusterUri}` - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (err) { - console.error(err); - setStatus({ success: false }); - setErrors({ submit: err.message }); - setSubmitting(false); - dispatch({ type: SET_ERROR, error: err.message }); - } - } - - if (loading || !(warehouse && warehouse.environment)) { - return ; - } - - return ( - <> - - Dataset: Warehouse Update | data.all - - - - - - - Edit warehouse {warehouse.label} - - } - sx={{ mt: 1 }} - > - - Discover - - - Warehouses - - - {warehouse.label} - - - - - - - - - - - { - await submit(values, setStatus, setSubmitting, setErrors); - }} - > - {({ - errors, - handleBlur, - handleChange, - handleSubmit, - isSubmitting, - setFieldValue, - touched, - values - }) => ( -
- - - - - - - - - - {touched.description && errors.description && ( - - - {errors.description} - - - )} - - - - - - { - setFieldValue( - 'SamlGroupName', - event.target.value - ); - }} - select - value={values.SamlGroupName} - variant="outlined" - > - {groupOptions.map((group) => ( - - {group.label} - - ))} - - - - - { - setFieldValue('tags', [...chip]); - }} - /> - - - - - - - - - - - - - - - - - - - - Save - - - - -
- )} -
-
-
-
- - ); -}; - -export default WarehouseEditForm; diff --git a/frontend/src/views/Warehouses/WarehouseImportForm.js b/frontend/src/views/Warehouses/WarehouseImportForm.js deleted file mode 100644 index 8c6ad3b32..000000000 --- a/frontend/src/views/Warehouses/WarehouseImportForm.js +++ /dev/null @@ -1,490 +0,0 @@ -import { Link as RouterLink, useNavigate, useParams } from 'react-router-dom'; -import * as Yup from 'yup'; -import { Formik } from 'formik'; -import { useSnackbar } from 'notistack'; -import { - Box, - Breadcrumbs, - Button, - Card, - CardContent, - CardHeader, - CircularProgress, - Container, - FormHelperText, - Grid, - Link, - MenuItem, - TextField, - Typography -} from '@mui/material'; -import { Helmet } from 'react-helmet-async'; -import { LoadingButton } from '@mui/lab'; -import { useCallback, useEffect, useState } from 'react'; -import useClient from '../../hooks/useClient'; -import ChevronRightIcon from '../../icons/ChevronRight'; -import ArrowLeftIcon from '../../icons/ArrowLeft'; -import useSettings from '../../hooks/useSettings'; -import listEnvironments from '../../api/Environment/listEnvironments'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import ChipInput from '../../components/TagsInput'; -import importRedshiftCluster from '../../api/RedshiftCluster/importCluster'; -import listEnvironmentGroups from '../../api/Environment/listEnvironmentGroups'; -import * as Defaults from '../../components/defaults'; - -const WarehouseCreateForm = (props) => { - const navigate = useNavigate(); - const params = useParams(); - const { enqueueSnackbar } = useSnackbar(); - const dispatch = useDispatch(); - const client = useClient(); - const { settings } = useSettings(); - const [loading, setLoading] = useState(true); - const [groupOptions, setGroupOptions] = useState([]); - const [environmentOptions, setEnvironmentOptions] = useState([]); - const [environment, setEnvironment] = useState(null); - - const fetchEnvironments = useCallback(async () => { - setLoading(true); - const response = await client.query( - listEnvironments({ filter: Defaults.SelectListFilter }) - ); - if (!response.errors) { - setEnvironmentOptions( - response.data.listEnvironments.nodes.map((e) => ({ - ...e, - value: e.environmentUri, - label: e.label - })) - ); - setEnvironment( - response.data.listEnvironments.nodes[ - response.data.listEnvironments.nodes.findIndex( - (e) => e.environmentUri === params.uri - ) - ] - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [client, dispatch, params.uri]); - - const fetchGroups = useCallback( - async (environmentUri) => { - try { - const response = await client.query( - listEnvironmentGroups({ - filter: Defaults.SelectListFilter, - environmentUri - }) - ); - if (!response.errors) { - setGroupOptions( - response.data.listEnvironmentGroups.nodes.map((g) => ({ - value: g.groupUri, - label: g.groupUri - })) - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (e) { - dispatch({ type: SET_ERROR, error: e.message }); - } - }, - [client, dispatch] - ); - - useEffect(() => { - if (client) { - fetchEnvironments().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, fetchEnvironments, dispatch]); - - async function submit(values, setStatus, setSubmitting, setErrors) { - try { - const input = { - label: values.label, - description: values.description, - clusterIdentifier: values.clusterIdentifier, - tags: values.tags, - SamlGroupName: values.SamlGroupName, - databaseName: values.databaseName - }; - const response = await client.mutate( - importRedshiftCluster({ - environmentUri: values.environment.environmentUri, - input - }) - ); - if (!response.errors) { - setStatus({ success: true }); - setSubmitting(false); - enqueueSnackbar('Amazon Redshift cluster import started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - navigate( - `/console/warehouse/${response.data.importRedshiftCluster.clusterUri}` - ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - } catch (err) { - console.error(err); - setStatus({ success: false }); - setErrors({ submit: err.message }); - setSubmitting(false); - dispatch({ type: SET_ERROR, error: err.message }); - } - } - if (loading || !environmentOptions.length > 0 || !environment) { - return ; - } - - return ( - <> - - Warehouses: Warehouse Import | data.all - - - - - - - Import warehouse - - } - sx={{ mt: 1 }} - > - - Organize - - - Environments - - - {environment.label} - - - - - - - - - - - { - await submit(values, setStatus, setSubmitting, setErrors); - }} - > - {({ - errors, - handleBlur, - handleChange, - handleSubmit, - isSubmitting, - setFieldValue, - touched, - values - }) => ( -
- - - - - - - - - - {touched.description && errors.description && ( - - - {errors.description} - - - )} - - - - - - - - - - - - - - - - - - - environmentOptions[ - environmentOptions.findIndex( - (e) => e.environmentUri === params.uri - ) - ] - } - onChange={(event) => { - setFieldValue('SamlGroupName', ''); - fetchGroups( - event.target.value.environmentUri - ).catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - setFieldValue('environment', event.target.value); - }} - select - value={values.environment} - variant="outlined" - > - {environmentOptions.map((e) => ( - - {e.label} - - ))} - - - - - - - - - - - - - - {groupOptions.map((group) => ( - - {group.label} - - ))} - - - - - { - setFieldValue('tags', [...chip]); - }} - /> - - - - {errors.submit && ( - - {errors.submit} - - )} - - - Import Warehouse - - - - -
- )} -
-
-
-
- - ); -}; - -export default WarehouseCreateForm; diff --git a/frontend/src/views/Warehouses/WarehouseLoadDatasetModal.js b/frontend/src/views/Warehouses/WarehouseLoadDatasetModal.js deleted file mode 100644 index 561ee5090..000000000 --- a/frontend/src/views/Warehouses/WarehouseLoadDatasetModal.js +++ /dev/null @@ -1,193 +0,0 @@ -import PropTypes from 'prop-types'; -import { useSnackbar } from 'notistack'; -import { - Box, - Dialog, - IconButton, - Table, - TableBody, - TableCell, - TableHead, - TableRow, - Typography -} from '@mui/material'; -import CircularProgress from '@mui/material/CircularProgress'; -import { useCallback, useEffect, useState } from 'react'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import useClient from '../../hooks/useClient'; -import Scrollbar from '../../components/Scrollbar'; -import Pager from '../../components/Pager'; -import * as Defaults from '../../components/defaults'; -import { PagedResponseDefault } from '../../components/defaults'; -import listAvailableDatasets from '../../api/RedshiftCluster/listAvailableDatasets'; -import addDatasetToCluster from '../../api/RedshiftCluster/addDatasetToCluster'; -import PlusIcon from '../../icons/Plus'; - -const WarehouseLoadDatasetModal = (props) => { - const client = useClient(); - const { warehouse, onApply, onClose, open, reload, ...other } = props; - const { enqueueSnackbar } = useSnackbar(); - const [filter, setFilter] = useState(Defaults.DefaultFilter); - const [items, setItems] = useState(PagedResponseDefault); - const dispatch = useDispatch(); - const [loading, setLoading] = useState(true); - - const fetchItems = useCallback(async () => { - setLoading(true); - const response = await client.query( - listAvailableDatasets({ - clusterUri: warehouse.clusterUri, - filter: { - ...filter - } - }) - ); - if (!response.errors) { - setItems({ ...response.data.listRedshiftClusterAvailableDatasets }); - reload(); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [warehouse.clusterUri, client, dispatch, filter, reload]); - - const loadDataset = useCallback( - async (dataset) => { - const response = await client.mutate( - addDatasetToCluster({ - clusterUri: warehouse.clusterUri, - datasetUri: dataset.datasetUri - }) - ); - if (!response.errors) { - enqueueSnackbar('Dataset loading to cluster started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - await fetchItems(); - reload(true); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - }, - [ - client, - dispatch, - enqueueSnackbar, - reload, - warehouse.clusterUri, - fetchItems - ] - ); - - const handlePageChange = async (event, value) => { - if (value <= items.pages && value !== items.page) { - await setFilter({ ...filter, isShared: true, page: value }); - } - }; - - useEffect(() => { - if (client) { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, fetchItems, dispatch]); - - if (!warehouse) { - return null; - } - - return ( - - - - Load datasets to cluster {warehouse.label} - - - Dataset will be loaded from Amazon S3 to Amazon Redshift using - Redshift Spectrum - - {!loading && items && items.nodes.length <= 0 ? ( - - No items to add. - - ) : ( - - - - - - Name - AWS Account - Region - S3 Bucket - Glue Database - Actions - - - {loading ? ( - - ) : ( - - {items.nodes.length > 0 ? ( - items.nodes.map((dataset) => ( - - {dataset.name} - {dataset.AwsAccountId} - {dataset.region} - - {`s3://${dataset.S3BucketName}`} - - {dataset.GlueDatabaseName} - - { - loadDataset(dataset); - }} - > - - - - - )) - ) : ( - - No datasets found - - )} - - )} -
- -
-
- )} -
-
- ); -}; - -WarehouseLoadDatasetModal.propTypes = { - warehouse: PropTypes.object.isRequired, - onApply: PropTypes.func, - onClose: PropTypes.func, - reload: PropTypes.func, - open: PropTypes.bool.isRequired -}; - -export default WarehouseLoadDatasetModal; diff --git a/frontend/src/views/Warehouses/WarehouseOverview.js b/frontend/src/views/Warehouses/WarehouseOverview.js deleted file mode 100644 index ff22a5ff7..000000000 --- a/frontend/src/views/Warehouses/WarehouseOverview.js +++ /dev/null @@ -1,56 +0,0 @@ -import { Box, Grid } from '@mui/material'; -import PropTypes from 'prop-types'; -import ObjectBrief from '../../components/ObjectBrief'; -import ObjectMetadata from '../../components/ObjectMetadata'; -import WarehouseConnection from './WarehouseConnection'; -import WarehouseCredentials from './WarehouseCredentials'; - -const WarehouseOverview = (props) => { - const { warehouse, ...other } = props; - - return ( - - - - 0 - ? warehouse.tags - : ['-'] - } - /> - - - - - - - {' '} - - - - - - - - - ); -}; - -WarehouseOverview.propTypes = { - warehouse: PropTypes.object.isRequired -}; - -export default WarehouseOverview; diff --git a/frontend/src/views/Warehouses/WarehouseTables.js b/frontend/src/views/Warehouses/WarehouseTables.js deleted file mode 100644 index 7f589d8c3..000000000 --- a/frontend/src/views/Warehouses/WarehouseTables.js +++ /dev/null @@ -1,250 +0,0 @@ -import PropTypes from 'prop-types'; -import { useCallback, useEffect, useState } from 'react'; -import { - Box, - Card, - CardHeader, - Divider, - Grid, - IconButton, - InputAdornment, - Table, - TableBody, - TableCell, - TableHead, - TableRow, - TextField -} from '@mui/material'; -import CircularProgress from '@mui/material/CircularProgress'; -import { DeleteOutlined } from '@mui/icons-material'; -import { LoadingButton } from '@mui/lab'; -import { useSnackbar } from 'notistack'; -import { BsTable } from 'react-icons/bs'; -import useClient from '../../hooks/useClient'; -import * as Defaults from '../../components/defaults'; -import Scrollbar from '../../components/Scrollbar'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import SearchIcon from '../../icons/Search'; -import PlusIcon from '../../icons/Plus'; -import Pager from '../../components/Pager'; -import listClusterDatasetTables from '../../api/RedshiftCluster/listClusterDatasetTables'; -import WarehouseCopyTableModal from './WarehouseCopyTableModal'; -import disableRedshiftClusterDatasetCopy from '../../api/RedshiftCluster/disableClusterDatasetCopy'; - -const WarehouseTables = ({ warehouse }) => { - const client = useClient(); - const dispatch = useDispatch(); - const { enqueueSnackbar } = useSnackbar(); - const [items, setItems] = useState(Defaults.PagedResponseDefault); - const [filter, setFilter] = useState(Defaults.DefaultFilter); - const [loading, setLoading] = useState(null); - const [inputValue, setInputValue] = useState(''); - const [isCopyTablesOpen, setIsLoadDatasetsOpen] = useState(false); - - const fetchItems = useCallback(async () => { - setLoading(true); - const response = await client.query( - listClusterDatasetTables({ - clusterUri: warehouse.clusterUri, - filter - }) - ); - if (!response.errors) { - setItems({ ...response.data.listRedshiftClusterCopyEnabledTables }); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setLoading(false); - }, [client, dispatch, filter, warehouse.clusterUri]); - - const handleCopyTablesModalOpen = () => { - setIsLoadDatasetsOpen(true); - }; - - const handleCopyTablesModalClose = () => { - setIsLoadDatasetsOpen(false); - }; - - const handleInputChange = (event) => { - setInputValue(event.target.value); - setFilter({ ...filter, term: event.target.value }); - }; - - const handleInputKeyup = (event) => { - if (event.code === 'Enter') { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }; - - const handlePageChange = async (event, value) => { - if (value <= items.pages && value !== items.page) { - await setFilter({ ...filter, page: value }); - } - }; - - const disableCopy = useCallback( - async (table) => { - const res = await client.mutate( - disableRedshiftClusterDatasetCopy({ - clusterUri: warehouse.clusterUri, - datasetUri: table.datasetUri, - tableUri: table.tableUri - }) - ); - if (!res.errors) { - enqueueSnackbar('Table copy disabled', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - await fetchItems(); - } else { - dispatch({ type: SET_ERROR, error: res.errors[0].message }); - } - }, - [client, enqueueSnackbar, dispatch, warehouse.clusterUri, fetchItems] - ); - - useEffect(() => { - if (client) { - fetchItems().catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - } - }, [client, dispatch, filter.page, fetchItems]); - - return ( - - - } - sx={{ m: 1 }} - variant="outlined" - > - Copy table - - } - title={ - - - Tables copied from loaded datasets - - } - /> - - - - - - - - ) - }} - onChange={handleInputChange} - onKeyUp={handleInputKeyup} - placeholder="Search" - value={inputValue} - variant="outlined" - /> - - - - - - - - - Name - Schema - Location - Actions - - - {loading ? ( - - ) : ( - - {items.nodes.length > 0 ? ( - items.nodes.map((table) => ( - - {table.name} - {table.RedshiftSchema} - {table.RedshiftCopyDataLocation} - - { - disableCopy(table).catch((e) => - dispatch({ type: SET_ERROR, error: e.message }) - ); - }} - > - - - - - )) - ) : ( - - No tables found. - - )} - - )} -
- {items.nodes.length > 0 && ( - - )} -
-
-
- - {isCopyTablesOpen && ( - - )} -
- ); -}; - -WarehouseTables.propTypes = { - warehouse: PropTypes.object.isRequired -}; - -export default WarehouseTables; diff --git a/frontend/src/views/Warehouses/WarehouseView.js b/frontend/src/views/Warehouses/WarehouseView.js deleted file mode 100644 index 32c143244..000000000 --- a/frontend/src/views/Warehouses/WarehouseView.js +++ /dev/null @@ -1,343 +0,0 @@ -import React, { useCallback, useEffect, useState } from 'react'; -import { Link as RouterLink, useParams } from 'react-router-dom'; -import { Helmet } from 'react-helmet-async'; -import { - Box, - Breadcrumbs, - Button, - CircularProgress, - Container, - Divider, - Grid, - Link, - Tab, - Tabs, - Typography -} from '@mui/material'; -import { FaAws, FaTrash } from 'react-icons/fa'; -import { useNavigate } from 'react-router'; -import * as PropTypes from 'prop-types'; -import { - Folder, - Info, - LocalOffer, - PauseOutlined, - PlayArrowOutlined -} from '@mui/icons-material'; -import { useSnackbar } from 'notistack'; -import { LoadingButton } from '@mui/lab'; -import useSettings from '../../hooks/useSettings'; -import useClient from '../../hooks/useClient'; -import ChevronRightIcon from '../../icons/ChevronRight'; -import Stack from '../Stack/Stack'; -import { SET_ERROR } from '../../store/errorReducer'; -import { useDispatch } from '../../store'; -import WarehouseOverview from './WarehouseOverview'; -import DeleteObjectWithFrictionModal from '../../components/DeleteObjectWithFrictionModal'; -import deleteRedshiftCluster from '../../api/RedshiftCluster/deleteCluster'; -import getCluster from '../../api/RedshiftCluster/getCluster'; -import pauseRedshiftCluster from '../../api/RedshiftCluster/pauseCluster'; -import resumeRedshiftCluster from '../../api/RedshiftCluster/resumeCluster'; -import WarehouseDatasets from './WarehouseDatasets'; -import StackStatus from '../Stack/StackStatus'; -import KeyValueTagList from '../KeyValueTags/KeyValueTagList'; - -const tabs = [ - { label: 'Overview', value: 'overview', icon: }, - { label: 'Datasets', value: 'datasets', icon: }, - { label: 'Tags', value: 'tags', icon: }, - { label: 'Stack', value: 'stack', icon: } -]; -function WarehouseViewPageHeader({ - warehouse, - deleteCluster, - pauseCluster, - resumeCluster, - resumeLoader, - pauseLoader -}) { - return ( - - - - Warehouse {warehouse.label} - - } - sx={{ mt: 1 }} - > - - Organize - - - Environments - - - {warehouse.environment.label} - - - {warehouse.label} - - - - - - {resumeCluster && ( - } - sx={{ mt: 1, mr: 1 }} - onClick={resumeCluster} - type="button" - variant="outlined" - > - Resume - - )} - {pauseCluster && ( - } - sx={{ mt: 1, mr: 1 }} - onClick={pauseCluster} - type="button" - variant="outlined" - > - Pause - - )} - - - - - ); -} - -WarehouseViewPageHeader.propTypes = { - warehouse: PropTypes.object.isRequired, - deleteCluster: PropTypes.func.isRequired, - pauseCluster: PropTypes.func.isRequired, - resumeCluster: PropTypes.func.isRequired, - resumeLoader: PropTypes.bool.isRequired, - pauseLoader: PropTypes.bool.isRequired -}; -const WarehouseView = () => { - const dispatch = useDispatch(); - const { settings } = useSettings(); - const { enqueueSnackbar } = useSnackbar(); - const params = useParams(); - const client = useClient(); - const navigate = useNavigate(); - const [currentTab, setCurrentTab] = useState('overview'); - const [loading, setLoading] = useState(true); - const [warehouse, setWarehouse] = useState(null); - const [stack, setStack] = useState(null); - const [showResumeCluster, setShowResumeCluster] = useState(false); - const [showPauseCluster, setShowPauseCluster] = useState(false); - const [isDeleteObjectModalOpen, setIsDeleteObjectModalOpen] = useState(false); - const handleDeleteObjectModalOpen = () => { - setIsDeleteObjectModalOpen(true); - }; - - const handleDeleteObjectModalClose = () => { - setIsDeleteObjectModalOpen(false); - }; - - const fetchItem = useCallback(async () => { - setLoading(true); - const response = await client.query(getCluster(params.uri)); - if (!response.errors && response.data.getRedshiftCluster !== null) { - setWarehouse(response.data.getRedshiftCluster); - if (stack) { - setStack(response.data.getRedshiftCluster.stack); - } - } else { - const error = response.errors - ? response.errors[0].message - : 'Warehouse not found'; - dispatch({ type: SET_ERROR, error }); - } - setLoading(false); - }, [client, dispatch, params.uri, stack]); - useEffect(() => { - if (client) { - fetchItem().catch((e) => dispatch({ type: SET_ERROR, error: e.message })); - } - }, [client, fetchItem, dispatch]); - - const handleTabsChange = (event, value) => { - setCurrentTab(value); - }; - - const deleteCluster = async (deleteFromAWS = false) => { - const response = await client.mutate( - deleteRedshiftCluster(warehouse.clusterUri, deleteFromAWS) - ); - if (!response.errors) { - handleDeleteObjectModalClose(); - navigate(`/console/environments/${warehouse.environment.environmentUri}`); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - }; - - const pauseCluster = async () => { - const response = await client.mutate( - pauseRedshiftCluster(warehouse.clusterUri) - ); - if (!response.errors) { - enqueueSnackbar('Amazon Redshift cluster pause started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - await fetchItem(); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setShowPauseCluster(false); - }; - const resumeCluster = async () => { - const response = await client.mutate( - resumeRedshiftCluster(warehouse.clusterUri) - ); - if (!response.errors) { - enqueueSnackbar('Amazon Redshift cluster resume started', { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - }); - await fetchItem(); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - setShowResumeCluster(false); - }; - - if (loading) { - return ; - } - if (!warehouse) { - return null; - } - - return ( - <> - - Warehouses: Warehouse Details | data.all - - - - - - - - {tabs.map((tab) => ( - - ))} - - - - - {currentTab === 'overview' && ( - - )} - {currentTab === 'datasets' && ( - - )} - {currentTab === 'tags' && ( - - )} - {currentTab === 'stack' && ( - - )} - - - - - - ); -}; - -export default WarehouseView; From 6f54b2e5da8b2bdf5202849d10cbc55f3655316b Mon Sep 17 00:00:00 2001 From: Balint David Date: Wed, 5 Jul 2023 14:57:54 +0200 Subject: [PATCH 4/7] remove warehouses enabled parameter --- .../api/Objects/Environment/input_types.py | 2 -- .../dataall/api/Objects/Environment/schema.py | 1 - backend/dataall/db/api/environment.py | 3 -- backend/dataall/db/models/Environment.py | 2 -- ...fc49baecea4_add_enviromental_parameters.py | 8 +++-- .../src/api/Environment/createEnvironment.js | 1 - .../src/api/Environment/getEnvironment.js | 1 - .../listOrganizationEnvironments.js | 1 - .../src/api/Environment/updateEnvironment.js | 1 - frontend/src/routes.js | 29 ------------------- .../Environments/EnvironmentCreateForm.js | 2 -- .../views/Environments/EnvironmentEditForm.js | 2 -- .../views/Environments/EnvironmentFeatures.js | 19 ------------ tests/api/test_environment.py | 6 ---- .../datasets/test_dataset_resource_found.py | 1 - 15 files changed, 6 insertions(+), 73 deletions(-) diff --git a/backend/dataall/api/Objects/Environment/input_types.py b/backend/dataall/api/Objects/Environment/input_types.py index 19c9cf103..41e1bb31c 100644 --- a/backend/dataall/api/Objects/Environment/input_types.py +++ b/backend/dataall/api/Objects/Environment/input_types.py @@ -28,7 +28,6 @@ gql.Argument('description', gql.String), gql.Argument('AwsAccountId', gql.NonNullableType(gql.String)), gql.Argument('region', gql.NonNullableType(gql.String)), - gql.Argument('warehousesEnabled', type=gql.Boolean), gql.Argument('vpcId', gql.String), gql.Argument('privateSubnetIds', gql.ArrayType(gql.String)), gql.Argument('publicSubnetIds', gql.ArrayType(gql.String)), @@ -49,7 +48,6 @@ gql.Argument('vpcId', gql.String), gql.Argument('privateSubnetIds', gql.ArrayType(gql.String)), gql.Argument('publicSubnetIds', gql.ArrayType(gql.String)), - gql.Argument('warehousesEnabled', type=gql.Boolean), gql.Argument('resourcePrefix', gql.String), gql.Argument('parameters', gql.ArrayType(ModifyEnvironmentParameterInput)) ], diff --git a/backend/dataall/api/Objects/Environment/schema.py b/backend/dataall/api/Objects/Environment/schema.py index 669365a56..85ec542bb 100644 --- a/backend/dataall/api/Objects/Environment/schema.py +++ b/backend/dataall/api/Objects/Environment/schema.py @@ -83,7 +83,6 @@ resolver=resolve_user_role, ), gql.Field('validated', type=gql.Boolean), - gql.Field('warehousesEnabled', type=gql.Boolean), gql.Field('roleCreated', type=gql.Boolean), gql.Field('isOrganizationDefaultEnvironment', type=gql.Boolean), gql.Field('stack', type=gql.Ref('Stack'), resolver=get_environment_stack), diff --git a/backend/dataall/db/api/environment.py b/backend/dataall/db/api/environment.py index 5edc5ecb0..545c45d70 100644 --- a/backend/dataall/db/api/environment.py +++ b/backend/dataall/db/api/environment.py @@ -57,7 +57,6 @@ def create_environment(session, username, groups, uri, data=None, check_perm=Non ), EnvironmentDefaultIAMRoleArn=f'arn:aws:iam::{data.get("AwsAccountId")}:role/{data.get("EnvironmentDefaultIAMRoleName")}', CDKRoleArn=f"arn:aws:iam::{data.get('AwsAccountId')}:role/{data['cdk_role_name']}", - warehousesEnabled=data.get('warehousesEnabled', True), resourcePrefix=data.get('resourcePrefix'), ) @@ -184,8 +183,6 @@ def update_environment(session, username, groups, uri, data=None, check_perm=Non environment.description = data.get('description', 'No description provided') if data.get('tags'): environment.tags = data.get('tags') - if 'warehousesEnabled' in data.keys(): - environment.warehousesEnabled = data.get('warehousesEnabled') if data.get('resourcePrefix'): environment.resourcePrefix = data.get('resourcePrefix') diff --git a/backend/dataall/db/models/Environment.py b/backend/dataall/db/models/Environment.py index 9701ec70d..679c36fbb 100644 --- a/backend/dataall/db/models/Environment.py +++ b/backend/dataall/db/models/Environment.py @@ -24,8 +24,6 @@ class Environment(Resource, Base): EnvironmentDefaultAthenaWorkGroup = Column(String) roleCreated = Column(Boolean, nullable=False, default=False) - warehousesEnabled = Column(Boolean, default=True) - userRoleInEnvironment = query_expression() SamlGroupName = Column(String, nullable=True) diff --git a/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py b/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py index c08e8baf7..e4f34fe8e 100644 --- a/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py +++ b/backend/migrations/versions/5fc49baecea4_add_enviromental_parameters.py @@ -47,6 +47,7 @@ class Environment(Resource, Base): mlStudiosEnabled = Column(Boolean) pipelinesEnabled = Column(Boolean) dashboardsEnabled = Column(Boolean) + warehousesEnabled = Column(Boolean) class EnvironmentParameter(Base): @@ -80,7 +81,7 @@ def upgrade(): Column("paramKey", String, primary_key=True), Column("paramValue", String, nullable=False), ) - print("Creation of environment_parameters is done") + print("Creation of environment_parameters table is done") print("Migrating the environmental parameters from environment table to environment_parameters table...") envs: List[Environment] = session.query(Environment).all() @@ -106,6 +107,7 @@ def upgrade(): op.drop_column("environment", "mlStudiosEnabled") op.drop_column("environment", "pipelinesEnabled") op.drop_column("environment", "dashboardsEnabled") + op.drop_column("environment", "warehousesEnabled") print("Dropped the columns from the environment table ") create_foreign_key_to_env(op, 'sagemaker_notebook') @@ -141,6 +143,7 @@ def downgrade(): op.add_column("environment", Column("mlStudiosEnabled", Boolean, default=True)) op.add_column("environment", Column("pipelinesEnabled", Boolean, default=True)) op.add_column("environment", Column("dashboardsEnabled", Boolean, default=True)) + op.add_column("environment", Column("warehousesEnabled", Boolean, default=True)) print("Filling environment table with parameters rows...") params = session.query(EnvironmentParameter).all() @@ -152,7 +155,8 @@ def downgrade(): notebooksEnabled=params["notebooksEnabled"] == "true", mlStudiosEnabled=params["mlStudiosEnabled"] == "true", pipelinesEnabled=params["pipelinesEnabled"] == "true", - dashboardsEnabled=params["dashboardsEnabled"] == "true" + dashboardsEnabled=params["dashboardsEnabled"] == "true", + dashboardsEnabled=params["warehousesEnabled"] == "true" )) save_deleted_permissions(session) diff --git a/frontend/src/api/Environment/createEnvironment.js b/frontend/src/api/Environment/createEnvironment.js index 6eb2c3b1c..49aee4d5f 100644 --- a/frontend/src/api/Environment/createEnvironment.js +++ b/frontend/src/api/Environment/createEnvironment.js @@ -13,7 +13,6 @@ const createEnvironment = (input) => ({ SamlGroupName AwsAccountId created - warehousesEnabled parameters { key value diff --git a/frontend/src/api/Environment/getEnvironment.js b/frontend/src/api/Environment/getEnvironment.js index 7a995b51f..ad2505c45 100644 --- a/frontend/src/api/Environment/getEnvironment.js +++ b/frontend/src/api/Environment/getEnvironment.js @@ -14,7 +14,6 @@ const getEnvironment = ({ environmentUri }) => ({ name label AwsAccountId - warehousesEnabled region owner tags diff --git a/frontend/src/api/Environment/listOrganizationEnvironments.js b/frontend/src/api/Environment/listOrganizationEnvironments.js index fe89eb5a9..16e7db5bb 100644 --- a/frontend/src/api/Environment/listOrganizationEnvironments.js +++ b/frontend/src/api/Environment/listOrganizationEnvironments.js @@ -32,7 +32,6 @@ const listOrganizationEnvironments = ({ organizationUri, filter }) => ({ tags environmentType AwsAccountId - warehousesEnabled userRoleInEnvironment stack { stack diff --git a/frontend/src/api/Environment/updateEnvironment.js b/frontend/src/api/Environment/updateEnvironment.js index e08871e05..81e00d485 100644 --- a/frontend/src/api/Environment/updateEnvironment.js +++ b/frontend/src/api/Environment/updateEnvironment.js @@ -16,7 +16,6 @@ const updateEnvironment = ({ environmentUri, input }) => ({ userRoleInEnvironment SamlGroupName AwsAccountId - warehousesEnabled created parameters { key diff --git a/frontend/src/routes.js b/frontend/src/routes.js index 406c96237..5a924e072 100644 --- a/frontend/src/routes.js +++ b/frontend/src/routes.js @@ -120,19 +120,6 @@ const PipelineEditForm = Loadable( lazy(() => import('./views/Pipelines/PipelineEditForm')) ); -const WarehouseCreateForm = Loadable( - lazy(() => import('./views/Warehouses/WarehouseCreateForm')) -); -const WarehouseView = Loadable( - lazy(() => import('./views/Warehouses/WarehouseView')) -); -const WarehouseEditForm = Loadable( - lazy(() => import('./views/Warehouses/WarehouseEditForm')) -); -const WarehouseImportForm = Loadable( - lazy(() => import('./views/Warehouses/WarehouseImportForm')) -); - const ShareList = Loadable(lazy(() => import('./views/Shares/ShareList'))); const ShareView = Loadable(lazy(() => import('./views/Shares/ShareView'))); @@ -205,14 +192,6 @@ const routes = [ } ] }, - { - path: 'warehouse/:uri', - element: - }, - { - path: 'warehouse/:uri/edit', - element: - }, { children: [ { @@ -226,14 +205,6 @@ const routes = [ { path: 'environments/:uri/edit', element: - }, - { - path: 'environments/:uri/warehouses/new', - element: - }, - { - path: 'environments/:uri/warehouses/import', - element: } ] }, diff --git a/frontend/src/views/Environments/EnvironmentCreateForm.js b/frontend/src/views/Environments/EnvironmentCreateForm.js index 0d6c08c8d..1c11243d5 100644 --- a/frontend/src/views/Environments/EnvironmentCreateForm.js +++ b/frontend/src/views/Environments/EnvironmentCreateForm.js @@ -153,7 +153,6 @@ const EnvironmentCreateForm = (props) => { tags: values.tags, description: values.description, region: values.region, - warehousesEnabled: values.warehousesEnabled, EnvironmentDefaultIAMRoleName: values.EnvironmentDefaultIAMRoleName, resourcePrefix: values.resourcePrefix, parameters: [ @@ -381,7 +380,6 @@ const EnvironmentCreateForm = (props) => { notebooksEnabled: true, mlStudiosEnabled: true, pipelinesEnabled: true, - warehousesEnabled: true, EnvironmentDefaultIAMRoleName: '', resourcePrefix: 'dataall' }} diff --git a/frontend/src/views/Environments/EnvironmentEditForm.js b/frontend/src/views/Environments/EnvironmentEditForm.js index d739ad7b3..6513e495c 100644 --- a/frontend/src/views/Environments/EnvironmentEditForm.js +++ b/frontend/src/views/Environments/EnvironmentEditForm.js @@ -75,7 +75,6 @@ const EnvironmentEditForm = (props) => { label: values.label, tags: values.tags, description: values.description, - warehousesEnabled: values.warehousesEnabled, resourcePrefix: values.resourcePrefix, parameters: [ { @@ -215,7 +214,6 @@ const EnvironmentEditForm = (props) => { pipelinesEnabled: env.parameters['pipelinesEnabled'] === 'true', dashboardsEnabled: env.parameters['dashboardsEnabled'] === 'true', - warehousesEnabled: env.warehousesEnabled, resourcePrefix: env.resourcePrefix }} validationSchema={Yup.object().shape({ diff --git a/frontend/src/views/Environments/EnvironmentFeatures.js b/frontend/src/views/Environments/EnvironmentFeatures.js index d8d8ebf66..b5d904aba 100644 --- a/frontend/src/views/Environments/EnvironmentFeatures.js +++ b/frontend/src/views/Environments/EnvironmentFeatures.js @@ -112,25 +112,6 @@ const EnvironmentFeatures = (props) => { - {/* - - Warehouses - - - - - */} diff --git a/tests/api/test_environment.py b/tests/api/test_environment.py index c781c51e3..691ce1294 100644 --- a/tests/api/test_environment.py +++ b/tests/api/test_environment.py @@ -29,7 +29,6 @@ def get_env(client, env1, group): region SamlGroupName owner - warehousesEnabled stack{ EcsTaskArn EcsTaskId @@ -56,7 +55,6 @@ def test_get_environment(client, org1, env1, group): body = response.data.getEnvironment assert body.owner == 'alice' assert body.AwsAccountId == env1.AwsAccountId - assert body.warehousesEnabled params = {p.key: p.value for p in body.parameters} assert params["dashboardsEnabled"] == "true" @@ -100,7 +98,6 @@ def test_update_env(client, org1, env1, group): owner tags resourcePrefix - warehousesEnabled parameters { key value @@ -115,7 +112,6 @@ def test_update_env(client, org1, env1, group): input={ 'label': 'DEV', 'tags': ['test', 'env'], - 'warehousesEnabled': False, 'parameters': [ { 'key': 'moduleEnabled', @@ -134,7 +130,6 @@ def test_update_env(client, org1, env1, group): input={ 'label': 'DEV', 'tags': ['test', 'env'], - 'warehousesEnabled': False, 'parameters': [ { 'key': 'moduleEnabled', @@ -155,7 +150,6 @@ def test_update_env(client, org1, env1, group): assert response.data.updateEnvironment.label == 'DEV' assert str(response.data.updateEnvironment.tags) == str(['test', 'env']) assert not response.data.updateEnvironment.dashboardsEnabled - assert not response.data.updateEnvironment.warehousesEnabled assert response.data.updateEnvironment.parameters assert response.data.updateEnvironment.parameters[0]["key"] == "moduleEnabled" assert response.data.updateEnvironment.parameters[0]["value"] == "True" diff --git a/tests/modules/datasets/test_dataset_resource_found.py b/tests/modules/datasets/test_dataset_resource_found.py index 3536c1702..30ac3b1e4 100644 --- a/tests/modules/datasets/test_dataset_resource_found.py +++ b/tests/modules/datasets/test_dataset_resource_found.py @@ -30,7 +30,6 @@ def get_env(client, env1, group): region SamlGroupName owner - warehousesEnabled stack{ EcsTaskArn EcsTaskId From 22c5228fd4f5c5e993d17763488c802e649bac46 Mon Sep 17 00:00:00 2001 From: Balint David Date: Wed, 5 Jul 2023 15:23:13 +0200 Subject: [PATCH 5/7] Cleaning up core folder structure --- backend/dataall/api/Objects/Environment/resolvers.py | 2 +- backend/dataall/core/environment/{ => db}/models.py | 0 backend/dataall/core/environment/db/repositories.py | 2 +- .../dataall/core/{group => environment/services}/__init__.py | 0 .../services/environment_resource_manager.py | 0 backend/dataall/core/group/services/__init__.py | 0 backend/dataall/db/api/environment.py | 4 ++-- backend/dataall/modules/dashboards/__init__.py | 2 +- backend/dataall/modules/dashboards/db/dashboard_repository.py | 2 +- backend/dataall/modules/datapipelines/__init__.py | 2 +- backend/dataall/modules/datapipelines/db/repositories.py | 2 +- backend/dataall/modules/dataset_sharing/__init__.py | 2 +- .../modules/dataset_sharing/db/share_object_repository.py | 2 +- backend/dataall/modules/datasets/__init__.py | 2 +- .../dataall/modules/datasets_base/db/dataset_repository.py | 2 +- backend/dataall/modules/mlstudio/db/mlstudio_repository.py | 2 +- backend/dataall/modules/notebooks/db/notebook_repository.py | 2 +- backend/dataall/modules/worksheets/__init__.py | 2 +- backend/dataall/modules/worksheets/db/repositories.py | 2 +- tests/api/test_organization.py | 2 +- 20 files changed, 17 insertions(+), 17 deletions(-) rename backend/dataall/core/environment/{ => db}/models.py (100%) rename backend/dataall/core/{group => environment/services}/__init__.py (100%) rename backend/dataall/core/{group => environment}/services/environment_resource_manager.py (100%) delete mode 100644 backend/dataall/core/group/services/__init__.py diff --git a/backend/dataall/api/Objects/Environment/resolvers.py b/backend/dataall/api/Objects/Environment/resolvers.py index 4c3fb690e..b740e18b2 100644 --- a/backend/dataall/api/Objects/Environment/resolvers.py +++ b/backend/dataall/api/Objects/Environment/resolvers.py @@ -14,7 +14,7 @@ from ....aws.handlers.cloudformation import CloudFormation from ....aws.handlers.iam import IAM from ....aws.handlers.parameter_store import ParameterStoreManager -from ....core.group.services.environment_resource_manager import EnvironmentResourceManager +from ....core.environment.services.environment_resource_manager import EnvironmentResourceManager from ....db import exceptions, permissions from ....db.api import Environment, ResourcePolicy, Stack from ....utils.naming_convention import ( diff --git a/backend/dataall/core/environment/models.py b/backend/dataall/core/environment/db/models.py similarity index 100% rename from backend/dataall/core/environment/models.py rename to backend/dataall/core/environment/db/models.py diff --git a/backend/dataall/core/environment/db/repositories.py b/backend/dataall/core/environment/db/repositories.py index 4b243beab..38cc67c4c 100644 --- a/backend/dataall/core/environment/db/repositories.py +++ b/backend/dataall/core/environment/db/repositories.py @@ -1,4 +1,4 @@ -from dataall.core.environment.models import EnvironmentParameter +from dataall.core.environment.db.models import EnvironmentParameter from sqlalchemy.sql import and_ diff --git a/backend/dataall/core/group/__init__.py b/backend/dataall/core/environment/services/__init__.py similarity index 100% rename from backend/dataall/core/group/__init__.py rename to backend/dataall/core/environment/services/__init__.py diff --git a/backend/dataall/core/group/services/environment_resource_manager.py b/backend/dataall/core/environment/services/environment_resource_manager.py similarity index 100% rename from backend/dataall/core/group/services/environment_resource_manager.py rename to backend/dataall/core/environment/services/environment_resource_manager.py diff --git a/backend/dataall/core/group/services/__init__.py b/backend/dataall/core/group/services/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/dataall/db/api/environment.py b/backend/dataall/db/api/environment.py index 545c45d70..73d8bdfe7 100644 --- a/backend/dataall/db/api/environment.py +++ b/backend/dataall/db/api/environment.py @@ -21,13 +21,13 @@ ) from ..models.Permission import PermissionType from ..paginator import paginate -from dataall.core.environment.models import EnvironmentParameter +from dataall.core.environment.db.models import EnvironmentParameter from dataall.core.environment.db.repositories import EnvironmentParameterRepository from dataall.utils.naming_convention import ( NamingConventionService, NamingConventionPattern, ) -from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager +from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager log = logging.getLogger(__name__) diff --git a/backend/dataall/modules/dashboards/__init__.py b/backend/dataall/modules/dashboards/__init__.py index 9c2f4f10e..d01529936 100644 --- a/backend/dataall/modules/dashboards/__init__.py +++ b/backend/dataall/modules/dashboards/__init__.py @@ -2,7 +2,7 @@ import logging from typing import Set -from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager +from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager from dataall.modules.dashboards.db.dashboard_repository import DashboardRepository from dataall.modules.dashboards.db.models import Dashboard from dataall.modules.loader import ImportMode, ModuleInterface diff --git a/backend/dataall/modules/dashboards/db/dashboard_repository.py b/backend/dataall/modules/dashboards/db/dashboard_repository.py index 0a3059f0f..d15eb089f 100644 --- a/backend/dataall/modules/dashboards/db/dashboard_repository.py +++ b/backend/dataall/modules/dashboards/db/dashboard_repository.py @@ -3,7 +3,7 @@ from sqlalchemy import or_, and_ from sqlalchemy.orm import Query -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource from dataall.db import exceptions, paginate from dataall.db.api import Environment from dataall.modules.dashboards.db.models import DashboardShare, DashboardShareStatus, Dashboard diff --git a/backend/dataall/modules/datapipelines/__init__.py b/backend/dataall/modules/datapipelines/__init__.py index 42dc4ef8f..23e5ec53b 100644 --- a/backend/dataall/modules/datapipelines/__init__.py +++ b/backend/dataall/modules/datapipelines/__init__.py @@ -2,7 +2,7 @@ import logging from typing import List, Type -from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager +from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager from dataall.modules.datapipelines.db.models import DataPipeline from dataall.modules.datapipelines.db.repositories import DatapipelinesRepository from dataall.modules.datapipelines.services.datapipelines_permissions import \ diff --git a/backend/dataall/modules/datapipelines/db/repositories.py b/backend/dataall/modules/datapipelines/db/repositories.py index e73f07977..dcb6b3582 100644 --- a/backend/dataall/modules/datapipelines/db/repositories.py +++ b/backend/dataall/modules/datapipelines/db/repositories.py @@ -1,7 +1,7 @@ from sqlalchemy import or_, and_ from sqlalchemy.orm import Query -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource from dataall.db import models, exceptions, paginate from dataall.modules.datapipelines.db.models import DataPipeline, DataPipelineEnvironment diff --git a/backend/dataall/modules/dataset_sharing/__init__.py b/backend/dataall/modules/dataset_sharing/__init__.py index 616a129f2..9b7010502 100644 --- a/backend/dataall/modules/dataset_sharing/__init__.py +++ b/backend/dataall/modules/dataset_sharing/__init__.py @@ -1,7 +1,7 @@ import logging from typing import List, Type, Set -from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager +from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager from dataall.modules.dataset_sharing.db.share_object_repository import ShareEnvironmentResource from dataall.modules.datasets_base import DatasetBaseModuleInterface from dataall.modules.loader import ModuleInterface, ImportMode diff --git a/backend/dataall/modules/dataset_sharing/db/share_object_repository.py b/backend/dataall/modules/dataset_sharing/db/share_object_repository.py index bdabbb837..aa0fb266f 100644 --- a/backend/dataall/modules/dataset_sharing/db/share_object_repository.py +++ b/backend/dataall/modules/dataset_sharing/db/share_object_repository.py @@ -4,7 +4,7 @@ from sqlalchemy import and_, or_, func, case from sqlalchemy.orm import Query -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource from dataall.db import models, exceptions, paginate from dataall.db.models.Enums import PrincipalType from dataall.modules.dataset_sharing.db.enums import ShareObjectActions, ShareObjectStatus, ShareItemActions, \ diff --git a/backend/dataall/modules/datasets/__init__.py b/backend/dataall/modules/datasets/__init__.py index d0baa03db..2829c1fe4 100644 --- a/backend/dataall/modules/datasets/__init__.py +++ b/backend/dataall/modules/datasets/__init__.py @@ -30,7 +30,7 @@ def __init__(self): from dataall.api.Objects.Vote.resolvers import add_vote_type from dataall.api.Objects.Feed.registry import FeedRegistry, FeedDefinition from dataall.api.Objects.Glossary.registry import GlossaryRegistry, GlossaryDefinition - from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager + from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager from dataall.modules.datasets.indexers.dataset_indexer import DatasetIndexer from dataall.modules.datasets.indexers.location_indexer import DatasetLocationIndexer from dataall.modules.datasets.indexers.table_indexer import DatasetTableIndexer diff --git a/backend/dataall/modules/datasets_base/db/dataset_repository.py b/backend/dataall/modules/datasets_base/db/dataset_repository.py index 0601b0eae..42dcd41aa 100644 --- a/backend/dataall/modules/datasets_base/db/dataset_repository.py +++ b/backend/dataall/modules/datasets_base/db/dataset_repository.py @@ -11,7 +11,7 @@ from dataall.db.exceptions import ObjectNotFound from dataall.db.models.Enums import Language from dataall.modules.datasets_base.db.enums import ConfidentialityClassification -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource from dataall.modules.datasets_base.db.models import DatasetTable, Dataset from dataall.utils.naming_convention import ( NamingConventionService, diff --git a/backend/dataall/modules/mlstudio/db/mlstudio_repository.py b/backend/dataall/modules/mlstudio/db/mlstudio_repository.py index 55717aec1..c0612eb1e 100644 --- a/backend/dataall/modules/mlstudio/db/mlstudio_repository.py +++ b/backend/dataall/modules/mlstudio/db/mlstudio_repository.py @@ -8,7 +8,7 @@ from dataall.db import paginate from dataall.modules.mlstudio.db.models import SagemakerStudioUser -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource class SageMakerStudioRepository(EnvironmentResource): diff --git a/backend/dataall/modules/notebooks/db/notebook_repository.py b/backend/dataall/modules/notebooks/db/notebook_repository.py index f7d1f50f7..2b0640de8 100644 --- a/backend/dataall/modules/notebooks/db/notebook_repository.py +++ b/backend/dataall/modules/notebooks/db/notebook_repository.py @@ -8,7 +8,7 @@ from dataall.db import paginate from dataall.modules.notebooks.db.models import SagemakerNotebook -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource class NotebookRepository(EnvironmentResource): diff --git a/backend/dataall/modules/worksheets/__init__.py b/backend/dataall/modules/worksheets/__init__.py index ebff948cf..93533c080 100644 --- a/backend/dataall/modules/worksheets/__init__.py +++ b/backend/dataall/modules/worksheets/__init__.py @@ -1,7 +1,7 @@ """Contains the code related to worksheets""" import logging -from dataall.core.group.services.environment_resource_manager import EnvironmentResourceManager +from dataall.core.environment.services.environment_resource_manager import EnvironmentResourceManager from dataall.modules.loader import ImportMode, ModuleInterface from dataall.modules.worksheets.db.models import Worksheet from dataall.modules.worksheets.db.repositories import WorksheetRepository diff --git a/backend/dataall/modules/worksheets/db/repositories.py b/backend/dataall/modules/worksheets/db/repositories.py index e291f8061..6b814a4d8 100644 --- a/backend/dataall/modules/worksheets/db/repositories.py +++ b/backend/dataall/modules/worksheets/db/repositories.py @@ -4,7 +4,7 @@ from sqlalchemy import or_ from sqlalchemy.orm import Query -from dataall.core.group.services.environment_resource_manager import EnvironmentResource +from dataall.core.environment.services.environment_resource_manager import EnvironmentResource from dataall.db import paginate from dataall.modules.worksheets.db.models import Worksheet, WorksheetQueryResult diff --git a/tests/api/test_organization.py b/tests/api/test_organization.py index 8930f014b..c79c1395d 100644 --- a/tests/api/test_organization.py +++ b/tests/api/test_organization.py @@ -1,7 +1,7 @@ import dataall import pytest -from dataall.core.environment.models import EnvironmentParameter +from dataall.core.environment.db.models import EnvironmentParameter @pytest.fixture(scope='module', autouse=True) From d650d51ddd38df51fca07d32e01c0ce05f571b23 Mon Sep 17 00:00:00 2001 From: Balint David Date: Thu, 6 Jul 2023 10:48:56 +0200 Subject: [PATCH 6/7] readding union with config in module loading check --- backend/dataall/modules/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/dataall/modules/loader.py b/backend/dataall/modules/loader.py index 36d4f5c54..20f698cef 100644 --- a/backend/dataall/modules/loader.py +++ b/backend/dataall/modules/loader.py @@ -226,7 +226,7 @@ def _check_loading_correct(in_config: Set[str], modes: Set[ImportMode]): ) # 4) Checks all references for modules (when ModuleInterfaces don't exist or not supported) - checked_module_names = {module.name() for module in expected_load} + checked_module_names = {module.name() for module in expected_load} | in_config for module in sys.modules.keys(): if module.startswith(_MODULE_PREFIX) and module != __name__: # skip loader name = _get_module_name(module) From 31784a41c3c7e8f4fd2f570ee4a44e5bccae6ab3 Mon Sep 17 00:00:00 2001 From: Balint David Date: Thu, 6 Jul 2023 10:54:17 +0200 Subject: [PATCH 7/7] adding comment to module loading --- backend/dataall/modules/loader.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/dataall/modules/loader.py b/backend/dataall/modules/loader.py index 20f698cef..291f41a53 100644 --- a/backend/dataall/modules/loader.py +++ b/backend/dataall/modules/loader.py @@ -226,7 +226,9 @@ def _check_loading_correct(in_config: Set[str], modes: Set[ImportMode]): ) # 4) Checks all references for modules (when ModuleInterfaces don't exist or not supported) - checked_module_names = {module.name() for module in expected_load} | in_config + checked_module_names = {module.name() for module in expected_load} + # Modules from the config that doesn't support the current mode weren't added in Step1, adding them here + checked_module_names |= in_config for module in sys.modules.keys(): if module.startswith(_MODULE_PREFIX) and module != __name__: # skip loader name = _get_module_name(module)