From eb37f7c78d41d579be20eb361c8c4cd7c04b6043 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Wed, 17 Feb 2021 08:20:03 +0530 Subject: [PATCH 1/9] Add NAS resource filesystem --- delfin/api/v1/filesystems.py | 54 +++++++++ delfin/api/v1/router.py | 5 + delfin/api/views/filesystems.py | 26 +++++ delfin/common/constants.py | 17 +++ delfin/db/api.py | 58 ++++++++++ delfin/db/sqlalchemy/api.py | 144 ++++++++++++++++++++++++ delfin/db/sqlalchemy/models.py | 18 +++ delfin/drivers/api.py | 5 + delfin/drivers/driver.py | 5 + delfin/drivers/fake_storage/__init__.py | 32 ++++++ delfin/exception.py | 4 + delfin/task_manager/tasks/resources.py | 51 +++++++++ 12 files changed, 419 insertions(+) create mode 100644 delfin/api/v1/filesystems.py create mode 100644 delfin/api/views/filesystems.py diff --git a/delfin/api/v1/filesystems.py b/delfin/api/v1/filesystems.py new file mode 100644 index 000000000..1fb2c4c3e --- /dev/null +++ b/delfin/api/v1/filesystems.py @@ -0,0 +1,54 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin import db +from delfin.api import api_utils +from delfin.api.common import wsgi +from delfin.api.views import filesystems as filesystem_view + + +class PortController(wsgi.Controller): + + def __init__(self): + super(PortController, self).__init__() + self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn', + 'native_controller_id', 'native_filesystem_id'] + + def _get_fs_search_options(self): + """Return filesystems search options allowed .""" + return self.search_options + + def index(self, req): + ctxt = req.environ['delfin.context'] + query_params = {} + query_params.update(req.GET) + # update options other than filters + sort_keys, sort_dirs = api_utils.get_sort_params(query_params) + marker, limit, offset = api_utils.get_pagination_params(query_params) + # strip out options except supported search options + api_utils.remove_invalid_options(ctxt, query_params, + self._get_fs_search_options()) + + filesystems = db.filesystem_get_all(ctxt, marker, limit, sort_keys, + sort_dirs, query_params, offset) + return filesystem_view.build_filesystems(filesystems) + + def show(self, req, id): + ctxt = req.environ['delfin.context'] + filesystem = db.filesystem_get(ctxt, id) + return filesystem_view.build_filesystem(filesystem) + + +def create_resource(): + return wsgi.Resource(PortController()) diff --git a/delfin/api/v1/router.py b/delfin/api/v1/router.py index 4f5bc1132..cf627e3c9 100644 --- a/delfin/api/v1/router.py +++ b/delfin/api/v1/router.py @@ -18,6 +18,7 @@ from delfin.api.v1 import alert_source from delfin.api.v1 import alerts from delfin.api.v1 import controllers +from delfin.api.v1 import filesystems from delfin.api.v1 import ports from delfin.api.v1 import disks from delfin.api.v1 import storage_pools @@ -109,3 +110,7 @@ def _setup_routes(self, mapper): self.resources['disks'] = disks.create_resource() mapper.resource("disk", "disks", controller=self.resources['disks']) + + self.resources['filesystems'] = filesystems.create_resource() + mapper.resource("filesystems", "filesystems", + controller=self.resources['filesystems']) diff --git a/delfin/api/views/filesystems.py b/delfin/api/views/filesystems.py new file mode 100644 index 000000000..c38406415 --- /dev/null +++ b/delfin/api/views/filesystems.py @@ -0,0 +1,26 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy + + +def build_filesystems(filesystems): + # Build list of filesystems + views = [build_filesystem(filesystem) + for filesystem in filesystems] + return dict(filesystems=views) + + +def build_filesystem(filesystem): + view = copy.deepcopy(filesystem) + return dict(view) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 5f95b236b..2fa0f0ab6 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -149,6 +149,23 @@ class DiskLogicalType(object): ALL = (FREE, MEMBER, HOTSPARE, CACHE, UNKNOWN) +class FilesystemStatus(object): + NORMAL = 'normal' + OFFLINE = 'offline' + UNKNOWN = 'unknown' + + ALL = (NORMAL, OFFLINE, UNKNOWN) + + +class FilesystemSecurityMode(object): + MIXED = 'mixed' + NATIVE = 'native' + WINDOWS = 'windows' + UNIX = 'unix' + + ALL = (MIXED, NATIVE, WINDOWS, UNIX) + + # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' diff --git a/delfin/db/api.py b/delfin/db/api.py index 6171a5396..590188a67 100644 --- a/delfin/db/api.py +++ b/delfin/db/api.py @@ -404,6 +404,64 @@ def disk_get_all(context, marker=None, limit=None, sort_keys=None, filters, offset) +def filesystems_create(context, values): + """Create multiple filesystems.""" + return IMPL.filesystems_create(context, values) + + +def filesystems_update(context, values): + """Update multiple filesystems.""" + return IMPL.filesystems_update(context, values) + + +def filesystems_delete(context, values): + """Delete multiple filesystems.""" + return IMPL.filesystems_delete(context, values) + + +def filesystem_create(context, values): + """Create a filesystem from the values dictionary.""" + return IMPL.filesystem_create(context, values) + + +def filesystem_update(context, filesystem_id, values): + """Update a filesystem with the values dictionary.""" + return IMPL.filesystem_update(context, filesystem_id, values) + + +def filesystem_get(context, filesystem_id): + """Get a filesystem or raise an exception if it does not exist.""" + return IMPL.filesystem_get(context, filesystem_id) + + +def filesystem_delete_by_storage(context, storage_id): + """Delete a filesystem or raise an exception if it does not exist.""" + return IMPL.filesystem_delete_by_storage(context, storage_id) + + +def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all filesystems. + If no sort parameters are specified then the returned volumes are sorted + first by the 'created_at' key and then by the 'id' key in descending + order. + :param context: context of this request, it's helpful to trace the request + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys, for example + 'desc' for descending order + :param filters: dictionary of filters + :param offset: number of items to skip + :returns: list of controllers + """ + return IMPL.filesystem_get_all(context, marker, limit, sort_keys, + sort_dirs, filters, offset) + + def access_info_create(context, values): """Create a storage access information that used to connect to a specific storage device. diff --git a/delfin/db/sqlalchemy/api.py b/delfin/db/sqlalchemy/api.py index 5e7a3a336..641aa2dbb 100644 --- a/delfin/db/sqlalchemy/api.py +++ b/delfin/db/sqlalchemy/api.py @@ -1010,6 +1010,148 @@ def _process_disk_info_filters(query, filters): return query +def filesystems_create(context, filesystems): + """Create multiple filesystems.""" + session = get_session() + filesystems_refs = [] + with session.begin(): + + for filesystem in filesystems: + LOG.debug('adding new filesystem for native_filesystem_id {0}:' + .format(filesystem.get('native_filesystem_id'))) + if not filesystem.get('id'): + filesystem['id'] = uuidutils.generate_uuid() + + filesystem_ref = models.Filesystem() + filesystem_ref.update(filesystem) + filesystems_refs.append(filesystem_ref) + + session.add_all(filesystems_refs) + + return filesystems_refs + + +def filesystems_update(context, filesystems): + """Update multiple filesystems.""" + session = get_session() + + with session.begin(): + filesystem_refs = [] + + for filesystem in filesystems: + LOG.debug('updating filesystem {0}:'.format( + filesystem.get('id'))) + query = _filesystem_get_query(context, session) + result = query.filter_by(id=filesystem.get('id') + ).update(filesystem) + + if not result: + LOG.error(exception.FilesystemNotFound(filesystem.get( + 'id'))) + else: + filesystem_refs.append(result) + + return filesystem_refs + + +def filesystems_delete(context, filesystems_id_list): + """Delete multiple filesystems.""" + session = get_session() + with session.begin(): + for filesystem_id in filesystems_id_list: + LOG.debug('deleting filesystem {0}:'.format(filesystem_id)) + query = _filesystem_get_query(context, session) + result = query.filter_by(id=filesystem_id).delete() + + if not result: + LOG.error(exception.FilesystemNotFound(filesystem_id)) + return + + +def _filesystem_get_query(context, session=None): + return model_query(context, models.Filesystem, session=session) + + +def _filesystem_get(context, filesystem_id, session=None): + result = (_filesystem_get_query(context, session=session) + .filter_by(id=filesystem_id) + .first()) + + if not result: + raise exception.FilesystemNotFound(filesystem_id) + + return result + + +def filesystem_create(context, values): + """Create a filesystem from the values dictionary.""" + if not values.get('id'): + values['id'] = uuidutils.generate_uuid() + + filesystem_ref = models.Filesystem() + filesystem_ref.update(values) + + session = get_session() + with session.begin(): + session.add(filesystem_ref) + + return _filesystem_get(context, + filesystem_ref['id'], + session=session) + + +def filesystem_update(context, filesystem_id, values): + """Update a filesystem with the values dictionary.""" + session = get_session() + + with session.begin(): + query = _filesystem_get_query(context, session) + result = query.filter_by(id=filesystem_id).update(values) + + if not result: + raise exception.FilesystemNotFound(filesystem_id) + + return result + + +def filesystem_get(context, filesystem_id): + """Get a filesystem or raise an exception if it does not exist.""" + return _filesystem_get(context, filesystem_id) + + +def filesystem_delete_by_storage(context, storage_id): + """Delete filesystem or raise an exception if it does not exist.""" + _filesystem_get_query(context).filter_by(storage_id=storage_id).delete() + + +def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all filesystems.""" + + session = get_session() + with session.begin(): + # Generate the query + query = _generate_paginate_query(context, session, models.Filesystem, + marker, limit, sort_keys, sort_dirs, + filters, offset, + ) + # No Filesystem would match, return empty list + if query is None: + return [] + return query.all() + + +@apply_like_filters(model=models.Filesystem) +def _process_filesystem_info_filters(query, filters): + """Common filter processing for filesystems queries.""" + if filters: + if not is_valid_model_filters(models.Filesystem, filters): + return + query = query.filter_by(**filters) + + return query + + def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, @@ -1126,6 +1268,8 @@ def alert_source_get_all(context, marker=None, limit=None, sort_keys=None, models.Port: (_port_get_query, _process_port_info_filters, _port_get), models.Disk: (_disk_get_query, _process_disk_info_filters, _disk_get), + models.Filesystem: (_filesystem_get_query, + _process_filesystem_info_filters, _filesystem_get), } diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index b41856c4b..070fdc672 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -175,6 +175,24 @@ class Port(BASE, DelfinBase): ipv6_mask = Column(String(255)) +class Filesystem(BASE, DelfinBase): + """Represents a filesystem object.""" + __tablename__ = 'filesystems' + id = Column(String(36), primary_key=True) + name = Column(String(255)) + storage_id = Column(String(36)) + native_filesystem_id = Column(String(255)) + status = Column(String(255)) + allocation_type = Column(String(255)) + security_mode = Column(String(255)) + total_capacity = Column(BigInteger) + used_capacity = Column(BigInteger) + free_capacity = Column(BigInteger) + compression = Column(Boolean) + deduplication = Column(Boolean) + worm = Column(Boolean) + + class AlertSource(BASE, DelfinBase): """Represents an alert source configuration.""" __tablename__ = 'alert_source' diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index b5073449b..d26f01c51 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -102,6 +102,11 @@ def list_disks(self, context, storage_id): driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_disks(context) + def list_filesystems(self, context, storage_id): + """List all filesystems from storage system.""" + driver = self.driver_manager.get_driver(context, storage_id=storage_id) + return driver.list_filesystems(context) + def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index cf148c41c..b3bd1bb7c 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -63,6 +63,11 @@ def list_disks(self, context): """List all disks from storage system.""" pass + @abc.abstractmethod + def list_filesystems(self, context): + """List all filesystems from storage system.""" + pass + @abc.abstractmethod def add_trap_config(self, context, trap_config): """Config the trap receiver in storage system.""" diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 409387d2b..3de813eb6 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -56,6 +56,7 @@ PAGE_LIMIT = 500 MIN_STORAGE, MAX_STORAGE = 1, 10 MIN_PERF_VALUES, MAX_PERF_VALUES = 1, 4 +MIN_FS, MAX_FS = 1, 10 def get_range_val(range_str, t): @@ -267,6 +268,37 @@ def list_disks(self, ctx): disk_list.append(c) return disk_list + def list_filesystems(self, ctx): + rd_filesystems_count = random.randint(MIN_FS, MAX_FS) + LOG.info("###########fake_filesystems for %s: %d" + % (self.storage_id, rd_filesystems_count)) + filesystem_list = [] + for idx in range(rd_filesystems_count): + total, used, free = self._get_random_capacity() + boolean = [True, False] + sts = list(constants.FilesystemStatus.ALL) + sts_len = len(constants.FilesystemStatus.ALL) - 1 + alloc_type = list(constants.VolumeType.ALL) + alloc_type_len = len(constants.VolumeType.ALL) - 1 + security = list(constants.FilesystemSecurityMode.ALL) + security_len = len(constants.FilesystemSecurityMode.ALL) - 1 + c = { + "name": "fake_filesystem_" + str(idx), + "storage_id": self.storage_id, + "native_filesystem_id": "fake_original_id_" + str(idx), + "status": sts[random.randint(0, sts_len)], + "allocation_type": alloc_type[random.randint(0, alloc_type_len)], + "security_mode": security[random.randint(0, security_len)], + "total_capacity": total, + "used_capacity": used, + "free_capacity": free, + "worm": boolean[random.randint(0, 1)], + "deduplication": boolean[random.randint(0, 1)], + "compression": boolean[random.randint(0, 1)], + } + filesystem_list.append(c) + return filesystem_list + def add_trap_config(self, context, trap_config): pass diff --git a/delfin/exception.py b/delfin/exception.py index 9b0b61ede..e5ec75ca8 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -182,6 +182,10 @@ class DiskNotFound(NotFound): msg_fmt = _("Disk {0} could not be found.") +class FilesystemNotFound(NotFound): + msg_fmt = _("Filesystem {0} could not be found.") + + class StorageDriverNotFound(NotFound): msg_fmt = _("Storage driver '{0}'could not be found.") diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index d144e44e7..e19491ef0 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -397,6 +397,57 @@ def remove(self): db.disk_delete_by_storage(self.context, self.storage_id) +class StorageFilesystemTask(StorageResourceTask): + def __init__(self, context, storage_id): + super(StorageFilesystemTask, self).__init__(context, storage_id) + + @check_deleted() + @set_synced_after() + def sync(self): + """ + :return: + """ + LOG.info('Syncing filesystems for storage id:{0}' + .format(self.storage_id)) + try: + # collect the filesystems list from driver and database + storage_filesystems = self.driver_api.list_filesystems( + self.context, self.storage_id) + db_filesystems = db.filesystem_get_all( + self.context, filters={"storage_id": self.storage_id}) + + add_list, update_list, delete_id_list = self._classify_resources( + storage_filesystems, db_filesystems, 'native_filesystem_id' + ) + + LOG.info('###StorageFilesystemTask for {0}:add={1},delete={2},' + 'update={3}'.format(self.storage_id, + len(add_list), + len(delete_id_list), + len(update_list))) + if delete_id_list: + db.filesystems_delete(self.context, delete_id_list) + + if update_list: + db.filesystems_update(self.context, update_list) + + if add_list: + db.filesystems_create(self.context, add_list) + except AttributeError as e: + LOG.error(e) + except Exception as e: + msg = _('Failed to sync filesystems entry in DB: {0}' + .format(e)) + LOG.error(msg) + else: + LOG.info("Syncing filesystems successful!!!") + + def remove(self): + LOG.info('Remove filesystems for storage id:{0}' + .format(self.storage_id)) + db.filesystem_delete_by_storage(self.context, self.storage_id) + + class PerformanceCollectionTask(object): def __init__(self): From a003f79af0646bbd0f803d9301fdc64c606f2441 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Wed, 17 Feb 2021 12:31:38 +0530 Subject: [PATCH 2/9] Separate drivers to NAS, SAN and Unified --- delfin/drivers/api.py | 6 ++- delfin/drivers/driver.py | 69 +++++++++++++++++++++++-- delfin/drivers/fake_storage/__init__.py | 2 +- 3 files changed, 70 insertions(+), 7 deletions(-) diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index d26f01c51..6b4c13d4a 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -20,6 +20,7 @@ from delfin import db from delfin.drivers import helper from delfin.drivers import manager +from delfin.drivers.driver import NASDriver LOG = log.getLogger(__name__) @@ -105,7 +106,10 @@ def list_disks(self, context, storage_id): def list_filesystems(self, context, storage_id): """List all filesystems from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) - return driver.list_filesystems(context) + if isinstance(driver, NASDriver): + return driver.list_filesystems(context) + else: + return [] def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index b3bd1bb7c..2989f160d 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -63,11 +63,6 @@ def list_disks(self, context): """List all disks from storage system.""" pass - @abc.abstractmethod - def list_filesystems(self, context): - """List all filesystems from storage system.""" - pass - @abc.abstractmethod def add_trap_config(self, context, trap_config): """Config the trap receiver in storage system.""" @@ -120,3 +115,67 @@ def list_alerts(self, context, query_para=None): def clear_alert(self, context, sequence_number): """Clear alert from storage system.""" pass + + +@six.add_metaclass(abc.ABCMeta) +class NASDriver(StorageDriver): + + def __init__(self, **kwargs): + """ + :param kwargs: A dictionary, include access information. Pay + attention that it's not safe to save username and password + in memory, so suggest each driver use them to get session + instead of save them in memory directly. + """ + super(NASDriver, self).__init__(**kwargs) + + @abc.abstractmethod + def list_filesystems(self, context): + """List all filesystems from storage system.""" + pass + + # @abc.abstractmethod + # def list_qtrees(self, context): + # """List all qtrees from storage system.""" + # pass + # + # @abc.abstractmethod + # def list_shares(self, context): + # """List all shares from storage system.""" + # pass + # + # @abc.abstractmethod + # def list_quotas(self, context): + # """List all quota from storage system.""" + # pass + + +@six.add_metaclass(abc.ABCMeta) +class SANDriver(StorageDriver): + + def __init__(self, **kwargs): + """ + :param kwargs: A dictionary, include access information. Pay + attention that it's not safe to save username and password + in memory, so suggest each driver use them to get session + instead of save them in memory directly. + """ + super(SANDriver, self).__init__(**kwargs) + + # @abc.abstractmethod + # def list_blocks(self, context): + # """List all blocks from storage system.""" + # pass + + +@six.add_metaclass(abc.ABCMeta) +class UnifiedStorageDriver(SANDriver, NASDriver): + + def __init__(self, **kwargs): + """ + :param kwargs: A dictionary, include access information. Pay + attention that it's not safe to save username and password + in memory, so suggest each driver use them to get session + instead of save them in memory directly. + """ + super(UnifiedStorageDriver, self).__init__(**kwargs) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 3de813eb6..f11d68e39 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -83,7 +83,7 @@ def _wait(f, *a, **k): return _wait -class FakeStorageDriver(driver.StorageDriver): +class FakeStorageDriver(driver.UnifiedStorageDriver): """FakeStorageDriver shows how to implement the StorageDriver, it also plays a role as faker to fake data for being tested by clients. """ From feefadee31ec62a960f9ced527c4234da7d48167 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Thu, 18 Feb 2021 09:06:28 +0530 Subject: [PATCH 3/9] Fix formatting --- delfin/api/v1/filesystems.py | 12 ++++++------ delfin/db/sqlalchemy/api.py | 6 +++--- delfin/drivers/fake_storage/__init__.py | 3 ++- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/delfin/api/v1/filesystems.py b/delfin/api/v1/filesystems.py index 1fb2c4c3e..0019c75ce 100644 --- a/delfin/api/v1/filesystems.py +++ b/delfin/api/v1/filesystems.py @@ -18,12 +18,12 @@ from delfin.api.views import filesystems as filesystem_view -class PortController(wsgi.Controller): +class FilesystemController(wsgi.Controller): def __init__(self): - super(PortController, self).__init__() - self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn', - 'native_controller_id', 'native_filesystem_id'] + super(FilesystemController, self).__init__() + self.search_options = ['name', 'status', 'id', 'storage_id', + 'native_filesystem_id'] def _get_fs_search_options(self): """Return filesystems search options allowed .""" @@ -41,7 +41,7 @@ def index(self, req): self._get_fs_search_options()) filesystems = db.filesystem_get_all(ctxt, marker, limit, sort_keys, - sort_dirs, query_params, offset) + sort_dirs, query_params, offset) return filesystem_view.build_filesystems(filesystems) def show(self, req, id): @@ -51,4 +51,4 @@ def show(self, req, id): def create_resource(): - return wsgi.Resource(PortController()) + return wsgi.Resource(FilesystemController()) diff --git a/delfin/db/sqlalchemy/api.py b/delfin/db/sqlalchemy/api.py index 641aa2dbb..d021af2de 100644 --- a/delfin/db/sqlalchemy/api.py +++ b/delfin/db/sqlalchemy/api.py @@ -1096,8 +1096,8 @@ def filesystem_create(context, values): session.add(filesystem_ref) return _filesystem_get(context, - filesystem_ref['id'], - session=session) + filesystem_ref['id'], + session=session) def filesystem_update(context, filesystem_id, values): @@ -1125,7 +1125,7 @@ def filesystem_delete_by_storage(context, storage_id): def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): + sort_dirs=None, filters=None, offset=None): """Retrieves all filesystems.""" session = get_session() diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index f11d68e39..7b46ec334 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -287,7 +287,8 @@ def list_filesystems(self, ctx): "storage_id": self.storage_id, "native_filesystem_id": "fake_original_id_" + str(idx), "status": sts[random.randint(0, sts_len)], - "allocation_type": alloc_type[random.randint(0, alloc_type_len)], + "allocation_type": + alloc_type[random.randint(0, alloc_type_len)], "security_mode": security[random.randint(0, security_len)], "total_capacity": total, "used_capacity": used, From 5cbf05dad70617de9f12db3c837e8de63000e990 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Thu, 18 Feb 2021 10:42:35 +0530 Subject: [PATCH 4/9] Add Qtree resource support to delfin --- delfin/api/v1/qtrees.py | 55 +++++++++ delfin/api/v1/router.py | 9 +- delfin/api/views/qtrees.py | 26 +++++ delfin/common/constants.py | 11 +- delfin/db/api.py | 58 ++++++++++ delfin/db/sqlalchemy/api.py | 144 ++++++++++++++++++++++++ delfin/db/sqlalchemy/models.py | 14 +++ delfin/drivers/api.py | 8 ++ delfin/drivers/driver.py | 10 +- delfin/drivers/fake_storage/__init__.py | 27 ++++- delfin/exception.py | 4 + delfin/task_manager/tasks/resources.py | 51 +++++++++ 12 files changed, 407 insertions(+), 10 deletions(-) create mode 100644 delfin/api/v1/qtrees.py create mode 100644 delfin/api/views/qtrees.py diff --git a/delfin/api/v1/qtrees.py b/delfin/api/v1/qtrees.py new file mode 100644 index 000000000..4d0d0b507 --- /dev/null +++ b/delfin/api/v1/qtrees.py @@ -0,0 +1,55 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin import db +from delfin.api import api_utils +from delfin.api.common import wsgi +from delfin.api.views import qtrees as qtree_view + + +class QtreeController(wsgi.Controller): + + def __init__(self): + super(QtreeController, self).__init__() + self.search_options = ['name', 'state', 'id', 'storage_id', + 'native_filesystem_id', 'quota_id', + 'native_qtree_id'] + + def _get_qtrees_search_options(self): + """Return qtrees search options allowed .""" + return self.search_options + + def index(self, req): + ctxt = req.environ['delfin.context'] + query_params = {} + query_params.update(req.GET) + # update options other than filters + sort_keys, sort_dirs = api_utils.get_sort_params(query_params) + marker, limit, offset = api_utils.get_pagination_params(query_params) + # strip out options except supported search options + api_utils.remove_invalid_options(ctxt, query_params, + self._get_qtrees_search_options()) + + qtrees = db.qtree_get_all(ctxt, marker, limit, sort_keys, + sort_dirs, query_params, offset) + return qtree_view.build_qtrees(qtrees) + + def show(self, req, id): + ctxt = req.environ['delfin.context'] + qtree = db.qtree_get(ctxt, id) + return qtree_view.build_qtree(qtree) + + +def create_resource(): + return wsgi.Resource(QtreeController()) diff --git a/delfin/api/v1/router.py b/delfin/api/v1/router.py index cf627e3c9..6ca3ada9b 100644 --- a/delfin/api/v1/router.py +++ b/delfin/api/v1/router.py @@ -18,13 +18,14 @@ from delfin.api.v1 import alert_source from delfin.api.v1 import alerts from delfin.api.v1 import controllers +from delfin.api.v1 import disks from delfin.api.v1 import filesystems +from delfin.api.v1 import performance from delfin.api.v1 import ports -from delfin.api.v1 import disks +from delfin.api.v1 import qtrees from delfin.api.v1 import storage_pools from delfin.api.v1 import storages from delfin.api.v1 import volumes -from delfin.api.v1 import performance class APIRouter(common.APIRouter): @@ -114,3 +115,7 @@ def _setup_routes(self, mapper): self.resources['filesystems'] = filesystems.create_resource() mapper.resource("filesystems", "filesystems", controller=self.resources['filesystems']) + + self.resources['qtrees'] = qtrees.create_resource() + mapper.resource("qtrees", "qtrees", + controller=self.resources['qtrees']) diff --git a/delfin/api/views/qtrees.py b/delfin/api/views/qtrees.py new file mode 100644 index 000000000..c149574d5 --- /dev/null +++ b/delfin/api/views/qtrees.py @@ -0,0 +1,26 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy + + +def build_qtrees(qtrees): + # Build list of qtrees + views = [build_qtree(qtree) + for qtree in qtrees] + return dict(qtrees=views) + + +def build_qtree(qtree): + view = copy.deepcopy(qtree) + return dict(view) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 2fa0f0ab6..86c1a27ad 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -157,7 +157,7 @@ class FilesystemStatus(object): ALL = (NORMAL, OFFLINE, UNKNOWN) -class FilesystemSecurityMode(object): +class NASSecurityMode(object): MIXED = 'mixed' NATIVE = 'native' WINDOWS = 'windows' @@ -166,6 +166,15 @@ class FilesystemSecurityMode(object): ALL = (MIXED, NATIVE, WINDOWS, UNIX) +class QuotaState(object): + NORMAL = 'normal' + SOFT = 'soft_limit' + HARD = 'hard_limit' + ABNORMAL = 'abnormal' + + ALL = (NORMAL, SOFT, HARD, ABNORMAL) + + # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' diff --git a/delfin/db/api.py b/delfin/db/api.py index 590188a67..f3394e16c 100644 --- a/delfin/db/api.py +++ b/delfin/db/api.py @@ -462,6 +462,64 @@ def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs, filters, offset) +def qtrees_create(context, values): + """Create multiple qtrees.""" + return IMPL.qtrees_create(context, values) + + +def qtrees_update(context, values): + """Update multiple qtrees.""" + return IMPL.qtrees_update(context, values) + + +def qtrees_delete(context, values): + """Delete multiple qtrees.""" + return IMPL.qtrees_delete(context, values) + + +def qtree_create(context, values): + """Create a qtree from the values dictionary.""" + return IMPL.qtree_create(context, values) + + +def qtree_update(context, qtree_id, values): + """Update a qtree with the values dictionary.""" + return IMPL.qtree_update(context, qtree_id, values) + + +def qtree_get(context, qtree_id): + """Get a qtree or raise an exception if it does not exist.""" + return IMPL.qtree_get(context, qtree_id) + + +def qtree_delete_by_storage(context, storage_id): + """Delete a qtree or raise an exception if it does not exist.""" + return IMPL.qtree_delete_by_storage(context, storage_id) + + +def qtree_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all qtrees. + If no sort parameters are specified then the returned volumes are sorted + first by the 'created_at' key and then by the 'id' key in descending + order. + :param context: context of this request, it's helpful to trace the request + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys, for example + 'desc' for descending order + :param filters: dictionary of filters + :param offset: number of items to skip + :returns: list of controllers + """ + return IMPL.qtree_get_all(context, marker, limit, sort_keys, + sort_dirs, filters, offset) + + def access_info_create(context, values): """Create a storage access information that used to connect to a specific storage device. diff --git a/delfin/db/sqlalchemy/api.py b/delfin/db/sqlalchemy/api.py index d021af2de..d719add50 100644 --- a/delfin/db/sqlalchemy/api.py +++ b/delfin/db/sqlalchemy/api.py @@ -1152,6 +1152,148 @@ def _process_filesystem_info_filters(query, filters): return query +def qtrees_create(context, qtrees): + """Create multiple qtrees.""" + session = get_session() + qtrees_refs = [] + with session.begin(): + + for qtree in qtrees: + LOG.debug('adding new qtree for native_qtree_id {0}:' + .format(qtree.get('native_qtree_id'))) + if not qtree.get('id'): + qtree['id'] = uuidutils.generate_uuid() + + qtree_ref = models.Qtree() + qtree_ref.update(qtree) + qtrees_refs.append(qtree_ref) + + session.add_all(qtrees_refs) + + return qtrees_refs + + +def qtrees_update(context, qtrees): + """Update multiple qtrees.""" + session = get_session() + + with session.begin(): + qtree_refs = [] + + for qtree in qtrees: + LOG.debug('updating qtree {0}:'.format( + qtree.get('id'))) + query = _qtree_get_query(context, session) + result = query.filter_by(id=qtree.get('id') + ).update(qtree) + + if not result: + LOG.error(exception.QtreeNotFound(qtree.get( + 'id'))) + else: + qtree_refs.append(result) + + return qtree_refs + + +def qtrees_delete(context, qtrees_id_list): + """Delete multiple qtrees.""" + session = get_session() + with session.begin(): + for qtree_id in qtrees_id_list: + LOG.debug('deleting qtree {0}:'.format(qtree_id)) + query = _qtree_get_query(context, session) + result = query.filter_by(id=qtree_id).delete() + + if not result: + LOG.error(exception.QtreeNotFound(qtree_id)) + return + + +def _qtree_get_query(context, session=None): + return model_query(context, models.Qtree, session=session) + + +def _qtree_get(context, qtree_id, session=None): + result = (_qtree_get_query(context, session=session) + .filter_by(id=qtree_id) + .first()) + + if not result: + raise exception.QtreeNotFound(qtree_id) + + return result + + +def qtree_create(context, values): + """Create a qtree from the values dictionary.""" + if not values.get('id'): + values['id'] = uuidutils.generate_uuid() + + qtree_ref = models.Qtree() + qtree_ref.update(values) + + session = get_session() + with session.begin(): + session.add(qtree_ref) + + return _qtree_get(context, + qtree_ref['id'], + session=session) + + +def qtree_update(context, qtree_id, values): + """Update a qtree with the values dictionary.""" + session = get_session() + + with session.begin(): + query = _qtree_get_query(context, session) + result = query.filter_by(id=qtree_id).update(values) + + if not result: + raise exception.QtreeNotFound(qtree_id) + + return result + + +def qtree_get(context, qtree_id): + """Get a qtree or raise an exception if it does not exist.""" + return _qtree_get(context, qtree_id) + + +def qtree_delete_by_storage(context, storage_id): + """Delete qtree or raise an exception if it does not exist.""" + _qtree_get_query(context).filter_by(storage_id=storage_id).delete() + + +def qtree_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all qtrees.""" + + session = get_session() + with session.begin(): + # Generate the query + query = _generate_paginate_query(context, session, models.Qtree, + marker, limit, sort_keys, sort_dirs, + filters, offset, + ) + # No Qtree would match, return empty list + if query is None: + return [] + return query.all() + + +@apply_like_filters(model=models.Qtree) +def _process_qtree_info_filters(query, filters): + """Common filter processing for qtrees queries.""" + if filters: + if not is_valid_model_filters(models.Qtree, filters): + return + query = query.filter_by(**filters) + + return query + + def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, @@ -1270,6 +1412,8 @@ def alert_source_get_all(context, marker=None, limit=None, sort_keys=None, _disk_get), models.Filesystem: (_filesystem_get_query, _process_filesystem_info_filters, _filesystem_get), + models.Qtree: (_qtree_get_query, + _process_qtree_info_filters, _qtree_get), } diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index 070fdc672..3fe14821c 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -193,6 +193,20 @@ class Filesystem(BASE, DelfinBase): worm = Column(Boolean) +class Qtree(BASE, DelfinBase): + """Represents a qtree object.""" + __tablename__ = 'qtrees' + id = Column(String(36), primary_key=True) + name = Column(String(255)) + storage_id = Column(String(36)) + native_qtree_id = Column(String(255)) + native_filesystem_id = Column(String(255)) + quota_id = Column(String(255)) + path = Column(String(255)) + security_mode = Column(String(255)) + state = Column(String(255)) + + class AlertSource(BASE, DelfinBase): """Represents an alert source configuration.""" __tablename__ = 'alert_source' diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index 6b4c13d4a..4aa2abfa4 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -111,6 +111,14 @@ def list_filesystems(self, context, storage_id): else: return [] + def list_qtrees(self, context, storage_id): + """List all qtrees from storage system.""" + driver = self.driver_manager.get_driver(context, storage_id=storage_id) + if isinstance(driver, NASDriver): + return driver.list_qtrees(context) + else: + return [] + def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index 2989f160d..5aed3727f 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -134,11 +134,11 @@ def list_filesystems(self, context): """List all filesystems from storage system.""" pass - # @abc.abstractmethod - # def list_qtrees(self, context): - # """List all qtrees from storage system.""" - # pass - # + @abc.abstractmethod + def list_qtrees(self, context): + """List all qtrees from storage system.""" + pass + # @abc.abstractmethod # def list_shares(self, context): # """List all shares from storage system.""" diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 7b46ec334..83acfb072 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -280,8 +280,8 @@ def list_filesystems(self, ctx): sts_len = len(constants.FilesystemStatus.ALL) - 1 alloc_type = list(constants.VolumeType.ALL) alloc_type_len = len(constants.VolumeType.ALL) - 1 - security = list(constants.FilesystemSecurityMode.ALL) - security_len = len(constants.FilesystemSecurityMode.ALL) - 1 + security = list(constants.NASSecurityMode.ALL) + security_len = len(constants.NASSecurityMode.ALL) - 1 c = { "name": "fake_filesystem_" + str(idx), "storage_id": self.storage_id, @@ -300,6 +300,29 @@ def list_filesystems(self, ctx): filesystem_list.append(c) return filesystem_list + def list_qtrees(self, ctx): + rd_qtrees_count = random.randint(MIN_FS, MAX_FS) + LOG.info("###########fake_qtrees for %s: %d" + % (self.storage_id, rd_qtrees_count)) + qtree_list = [] + for idx in range(rd_qtrees_count): + state = list(constants.QuotaState.ALL) + state_len = len(constants.QuotaState.ALL) - 1 + security = list(constants.NASSecurityMode.ALL) + security_len = len(constants.NASSecurityMode.ALL) - 1 + c = { + "name": "fake_qtree_" + str(idx), + "storage_id": self.storage_id, + "native_qtree_id": "fake_original_id_" + str(idx), + "native_filesystem_id": "fake_filesystem_id_" + str(idx), + "quota_id": random.randint(0, 1000), + "state": state[random.randint(0, state_len)], + "security_mode": security[random.randint(0, security_len)], + "path": "/", + } + qtree_list.append(c) + return qtree_list + def add_trap_config(self, context, trap_config): pass diff --git a/delfin/exception.py b/delfin/exception.py index e5ec75ca8..09cfdbdb5 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -186,6 +186,10 @@ class FilesystemNotFound(NotFound): msg_fmt = _("Filesystem {0} could not be found.") +class QtreeNotFound(NotFound): + msg_fmt = _("Qtree {0} could not be found.") + + class StorageDriverNotFound(NotFound): msg_fmt = _("Storage driver '{0}'could not be found.") diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index e19491ef0..f00385084 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -448,6 +448,57 @@ def remove(self): db.filesystem_delete_by_storage(self.context, self.storage_id) +class StorageQtreeTask(StorageResourceTask): + def __init__(self, context, storage_id): + super(StorageQtreeTask, self).__init__(context, storage_id) + + @check_deleted() + @set_synced_after() + def sync(self): + """ + :return: + """ + LOG.info('Syncing qtrees for storage id:{0}' + .format(self.storage_id)) + try: + # collect the qtrees list from driver and database + storage_qtrees = self.driver_api.list_qtrees( + self.context, self.storage_id) + db_qtrees = db.qtree_get_all( + self.context, filters={"storage_id": self.storage_id}) + + add_list, update_list, delete_id_list = self._classify_resources( + storage_qtrees, db_qtrees, 'native_qtree_id' + ) + + LOG.info('###StorageQtreeTask for {0}:add={1},delete={2},' + 'update={3}'.format(self.storage_id, + len(add_list), + len(delete_id_list), + len(update_list))) + if delete_id_list: + db.qtrees_delete(self.context, delete_id_list) + + if update_list: + db.qtrees_update(self.context, update_list) + + if add_list: + db.qtrees_create(self.context, add_list) + except AttributeError as e: + LOG.error(e) + except Exception as e: + msg = _('Failed to sync Qtrees entry in DB: {0}' + .format(e)) + LOG.error(msg) + else: + LOG.info("Syncing Qtrees successful!!!") + + def remove(self): + LOG.info('Remove qtrees for storage id:{0}' + .format(self.storage_id)) + db.qtree_delete_by_storage(self.context, self.storage_id) + + class PerformanceCollectionTask(object): def __init__(self): From bee5bd7e9ed135e0697e3f1bf68b3caebd0d06c8 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Thu, 18 Feb 2021 11:28:38 +0530 Subject: [PATCH 5/9] Add Share resource support to delfin --- delfin/api/v1/router.py | 5 + delfin/api/v1/shares.py | 54 +++++++++ delfin/api/views/filesystems.py | 2 +- delfin/api/views/shares.py | 26 +++++ delfin/common/constants.py | 18 +++ delfin/db/api.py | 58 ++++++++++ delfin/db/sqlalchemy/api.py | 144 ++++++++++++++++++++++++ delfin/db/sqlalchemy/models.py | 15 +++ delfin/drivers/api.py | 8 ++ delfin/drivers/driver.py | 10 +- delfin/drivers/fake_storage/__init__.py | 25 ++++ delfin/exception.py | 4 + delfin/task_manager/tasks/resources.py | 51 +++++++++ 13 files changed, 414 insertions(+), 6 deletions(-) create mode 100644 delfin/api/v1/shares.py create mode 100644 delfin/api/views/shares.py diff --git a/delfin/api/v1/router.py b/delfin/api/v1/router.py index 6ca3ada9b..7ad4b1756 100644 --- a/delfin/api/v1/router.py +++ b/delfin/api/v1/router.py @@ -23,6 +23,7 @@ from delfin.api.v1 import performance from delfin.api.v1 import ports from delfin.api.v1 import qtrees +from delfin.api.v1 import shares from delfin.api.v1 import storage_pools from delfin.api.v1 import storages from delfin.api.v1 import volumes @@ -119,3 +120,7 @@ def _setup_routes(self, mapper): self.resources['qtrees'] = qtrees.create_resource() mapper.resource("qtrees", "qtrees", controller=self.resources['qtrees']) + + self.resources['shares'] = shares.create_resource() + mapper.resource("shares", "shares", + controller=self.resources['shares']) diff --git a/delfin/api/v1/shares.py b/delfin/api/v1/shares.py new file mode 100644 index 000000000..84004326a --- /dev/null +++ b/delfin/api/v1/shares.py @@ -0,0 +1,54 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin import db +from delfin.api import api_utils +from delfin.api.common import wsgi +from delfin.api.views import shares as share_view + + +class ShareController(wsgi.Controller): + + def __init__(self): + super(ShareController, self).__init__() + self.search_options = ['name', 'status', 'id', 'storage_id', + 'native_share_id'] + + def _get_fs_search_options(self): + """Return shares search options allowed .""" + return self.search_options + + def index(self, req): + ctxt = req.environ['delfin.context'] + query_params = {} + query_params.update(req.GET) + # update options other than filters + sort_keys, sort_dirs = api_utils.get_sort_params(query_params) + marker, limit, offset = api_utils.get_pagination_params(query_params) + # strip out options except supported search options + api_utils.remove_invalid_options(ctxt, query_params, + self._get_fs_search_options()) + + shares = db.share_get_all(ctxt, marker, limit, sort_keys, + sort_dirs, query_params, offset) + return share_view.build_shares(shares) + + def show(self, req, id): + ctxt = req.environ['delfin.context'] + share = db.share_get(ctxt, id) + return share_view.build_share(share) + + +def create_resource(): + return wsgi.Resource(ShareController()) diff --git a/delfin/api/views/filesystems.py b/delfin/api/views/filesystems.py index c38406415..90e23e4cf 100644 --- a/delfin/api/views/filesystems.py +++ b/delfin/api/views/filesystems.py @@ -1,4 +1,4 @@ -# Copyright 2020 The SODA Authors. +# Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/delfin/api/views/shares.py b/delfin/api/views/shares.py new file mode 100644 index 000000000..2b10026f2 --- /dev/null +++ b/delfin/api/views/shares.py @@ -0,0 +1,26 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy + + +def build_shares(shares): + # Build list of shares + views = [build_share(share) + for share in shares] + return dict(shares=views) + + +def build_share(share): + view = copy.deepcopy(share) + return dict(view) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 86c1a27ad..1f4ea17be 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -175,6 +175,24 @@ class QuotaState(object): ALL = (NORMAL, SOFT, HARD, ABNORMAL) +class ShareType(object): + CIFS = 'cifs' + NFS = 'nfs' + FTP = 'ftp' + UNKNOWN = 'unknown' + + ALL = (CIFS, NFS, FTP, UNKNOWN) + + +class ShareOfflineMode(object): + MANUAL = 'manual' + DOCUMENTS = 'documents' + PROGRAMS = 'programs' + NONE = 'none' + + ALL = (MANUAL, DOCUMENTS, PROGRAMS, NONE) + + # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' diff --git a/delfin/db/api.py b/delfin/db/api.py index f3394e16c..4cd782e76 100644 --- a/delfin/db/api.py +++ b/delfin/db/api.py @@ -520,6 +520,64 @@ def qtree_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs, filters, offset) +def shares_create(context, values): + """Create multiple shares.""" + return IMPL.shares_create(context, values) + + +def shares_update(context, values): + """Update multiple shares.""" + return IMPL.shares_update(context, values) + + +def shares_delete(context, values): + """Delete multiple shares.""" + return IMPL.shares_delete(context, values) + + +def share_create(context, values): + """Create a share from the values dictionary.""" + return IMPL.share_create(context, values) + + +def share_update(context, share_id, values): + """Update a share with the values dictionary.""" + return IMPL.share_update(context, share_id, values) + + +def share_get(context, share_id): + """Get a share or raise an exception if it does not exist.""" + return IMPL.share_get(context, share_id) + + +def share_delete_by_storage(context, storage_id): + """Delete a share or raise an exception if it does not exist.""" + return IMPL.share_delete_by_storage(context, storage_id) + + +def share_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all shares. + If no sort parameters are specified then the returned volumes are sorted + first by the 'created_at' key and then by the 'id' key in descending + order. + :param context: context of this request, it's helpful to trace the request + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_keys: list of attributes by which results should be sorted, + paired with corresponding item in sort_dirs + :param sort_dirs: list of directions in which results should be sorted, + paired with corresponding item in sort_keys, for example + 'desc' for descending order + :param filters: dictionary of filters + :param offset: number of items to skip + :returns: list of controllers + """ + return IMPL.share_get_all(context, marker, limit, sort_keys, + sort_dirs, filters, offset) + + def access_info_create(context, values): """Create a storage access information that used to connect to a specific storage device. diff --git a/delfin/db/sqlalchemy/api.py b/delfin/db/sqlalchemy/api.py index d719add50..7ce9a6703 100644 --- a/delfin/db/sqlalchemy/api.py +++ b/delfin/db/sqlalchemy/api.py @@ -1294,6 +1294,148 @@ def _process_qtree_info_filters(query, filters): return query +def shares_create(context, shares): + """Create multiple shares.""" + session = get_session() + shares_refs = [] + with session.begin(): + + for share in shares: + LOG.debug('adding new share for native_share_id {0}:' + .format(share.get('native_share_id'))) + if not share.get('id'): + share['id'] = uuidutils.generate_uuid() + + share_ref = models.Share() + share_ref.update(share) + shares_refs.append(share_ref) + + session.add_all(shares_refs) + + return shares_refs + + +def shares_update(context, shares): + """Update multiple shares.""" + session = get_session() + + with session.begin(): + share_refs = [] + + for share in shares: + LOG.debug('updating share {0}:'.format( + share.get('id'))) + query = _share_get_query(context, session) + result = query.filter_by(id=share.get('id') + ).update(share) + + if not result: + LOG.error(exception.ShareNotFound(share.get( + 'id'))) + else: + share_refs.append(result) + + return share_refs + + +def shares_delete(context, shares_id_list): + """Delete multiple shares.""" + session = get_session() + with session.begin(): + for share_id in shares_id_list: + LOG.debug('deleting share {0}:'.format(share_id)) + query = _share_get_query(context, session) + result = query.filter_by(id=share_id).delete() + + if not result: + LOG.error(exception.ShareNotFound(share_id)) + return + + +def _share_get_query(context, session=None): + return model_query(context, models.Share, session=session) + + +def _share_get(context, share_id, session=None): + result = (_share_get_query(context, session=session) + .filter_by(id=share_id) + .first()) + + if not result: + raise exception.ShareNotFound(share_id) + + return result + + +def share_create(context, values): + """Create a share from the values dictionary.""" + if not values.get('id'): + values['id'] = uuidutils.generate_uuid() + + share_ref = models.Share() + share_ref.update(values) + + session = get_session() + with session.begin(): + session.add(share_ref) + + return _share_get(context, + share_ref['id'], + session=session) + + +def share_update(context, share_id, values): + """Update a share with the values dictionary.""" + session = get_session() + + with session.begin(): + query = _share_get_query(context, session) + result = query.filter_by(id=share_id).update(values) + + if not result: + raise exception.ShareNotFound(share_id) + + return result + + +def share_get(context, share_id): + """Get a share or raise an exception if it does not exist.""" + return _share_get(context, share_id) + + +def share_delete_by_storage(context, storage_id): + """Delete share or raise an exception if it does not exist.""" + _share_get_query(context).filter_by(storage_id=storage_id).delete() + + +def share_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, offset=None): + """Retrieves all shares.""" + + session = get_session() + with session.begin(): + # Generate the query + query = _generate_paginate_query(context, session, models.Share, + marker, limit, sort_keys, sort_dirs, + filters, offset, + ) + # No Share would match, return empty list + if query is None: + return [] + return query.all() + + +@apply_like_filters(model=models.Share) +def _process_share_info_filters(query, filters): + """Common filter processing for shares queries.""" + if filters: + if not is_valid_model_filters(models.Share, filters): + return + query = query.filter_by(**filters) + + return query + + def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, @@ -1414,6 +1556,8 @@ def alert_source_get_all(context, marker=None, limit=None, sort_keys=None, _process_filesystem_info_filters, _filesystem_get), models.Qtree: (_qtree_get_query, _process_qtree_info_filters, _qtree_get), + models.Share: (_share_get_query, + _process_share_info_filters, _share_get), } diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index 3fe14821c..d2e488dab 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -207,6 +207,21 @@ class Qtree(BASE, DelfinBase): state = Column(String(255)) +class Share(BASE, DelfinBase): + """Represents a share object.""" + __tablename__ = 'shares' + id = Column(String(36), primary_key=True) + name = Column(String(255)) + storage_id = Column(String(36)) + native_share_id = Column(String(255)) + native_filesystem_id = Column(String(255)) + qtree_id = Column(String(255)) + type = Column(String(255)) + offline_mode = Column(String(255)) + oplock = Column(Boolean) + path = Column(String(255)) + + class AlertSource(BASE, DelfinBase): """Represents an alert source configuration.""" __tablename__ = 'alert_source' diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index 4aa2abfa4..3c0801c69 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -119,6 +119,14 @@ def list_qtrees(self, context, storage_id): else: return [] + def list_shares(self, context, storage_id): + """List all shares from storage system.""" + driver = self.driver_manager.get_driver(context, storage_id=storage_id) + if isinstance(driver, NASDriver): + return driver.list_shares(context) + else: + return [] + def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index 5aed3727f..9f2001980 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -139,11 +139,11 @@ def list_qtrees(self, context): """List all qtrees from storage system.""" pass - # @abc.abstractmethod - # def list_shares(self, context): - # """List all shares from storage system.""" - # pass - # + @abc.abstractmethod + def list_shares(self, context): + """List all shares from storage system.""" + pass + # @abc.abstractmethod # def list_quotas(self, context): # """List all quota from storage system.""" diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 83acfb072..d0060ff56 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -323,6 +323,31 @@ def list_qtrees(self, ctx): qtree_list.append(c) return qtree_list + def list_shares(self, ctx): + rd_shares_count = random.randint(MIN_FS, MAX_FS) + LOG.info("###########fake_shares for %s: %d" + % (self.storage_id, rd_shares_count)) + share_list = [] + for idx in range(rd_shares_count): + boolean = [True, False] + st = list(constants.ShareType.ALL) + st_len = len(constants.ShareType.ALL) - 1 + mode = list(constants.ShareOfflineMode.ALL) + mode_len = len(constants.ShareOfflineMode.ALL) - 1 + c = { + "name": "fake_share_" + str(idx), + "storage_id": self.storage_id, + "native_share_id": "fake_original_id_" + str(idx), + "native_filesystem_id": "fake_filesystem_id_" + str(idx), + "qtree_id": random.randint(0, 1000), + "type": st[random.randint(0, st_len)], + "offline_mode": mode[random.randint(0, mode_len)], + "oplock": boolean[random.randint(0, 1)], + "path": "/", + } + share_list.append(c) + return share_list + def add_trap_config(self, context, trap_config): pass diff --git a/delfin/exception.py b/delfin/exception.py index 09cfdbdb5..9dd4950d7 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -190,6 +190,10 @@ class QtreeNotFound(NotFound): msg_fmt = _("Qtree {0} could not be found.") +class ShareNotFound(NotFound): + msg_fmt = _("Share {0} could not be found.") + + class StorageDriverNotFound(NotFound): msg_fmt = _("Storage driver '{0}'could not be found.") diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index f00385084..a6b0dc953 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -499,6 +499,57 @@ def remove(self): db.qtree_delete_by_storage(self.context, self.storage_id) +class StorageShareTask(StorageResourceTask): + def __init__(self, context, storage_id): + super(StorageShareTask, self).__init__(context, storage_id) + + @check_deleted() + @set_synced_after() + def sync(self): + """ + :return: + """ + LOG.info('Syncing shares for storage id:{0}' + .format(self.storage_id)) + try: + # collect the shares list from driver and database + storage_shares = self.driver_api.list_shares( + self.context, self.storage_id) + db_shares = db.share_get_all( + self.context, filters={"storage_id": self.storage_id}) + + add_list, update_list, delete_id_list = self._classify_resources( + storage_shares, db_shares, 'native_share_id' + ) + + LOG.info('###StorageShareTask for {0}:add={1},delete={2},' + 'update={3}'.format(self.storage_id, + len(add_list), + len(delete_id_list), + len(update_list))) + if delete_id_list: + db.shares_delete(self.context, delete_id_list) + + if update_list: + db.shares_update(self.context, update_list) + + if add_list: + db.shares_create(self.context, add_list) + except AttributeError as e: + LOG.error(e) + except Exception as e: + msg = _('Failed to sync Shares entry in DB: {0}' + .format(e)) + LOG.error(msg) + else: + LOG.info("Syncing Shares successful!!!") + + def remove(self): + LOG.info('Remove shares for storage id:{0}' + .format(self.storage_id)) + db.share_delete_by_storage(self.context, self.storage_id) + + class PerformanceCollectionTask(object): def __init__(self): From f939610283fc45769a7b7c35d285b58249d2afb4 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Mon, 22 Feb 2021 12:32:28 +0530 Subject: [PATCH 6/9] Add quota limit params to qtree --- delfin/api/v1/qtrees.py | 2 +- delfin/db/sqlalchemy/models.py | 7 ++++++- delfin/drivers/fake_storage/__init__.py | 13 ++++++++++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/delfin/api/v1/qtrees.py b/delfin/api/v1/qtrees.py index 4d0d0b507..6cebca28c 100644 --- a/delfin/api/v1/qtrees.py +++ b/delfin/api/v1/qtrees.py @@ -23,7 +23,7 @@ class QtreeController(wsgi.Controller): def __init__(self): super(QtreeController, self).__init__() self.search_options = ['name', 'state', 'id', 'storage_id', - 'native_filesystem_id', 'quota_id', + 'native_filesystem_id', 'native_qtree_id'] def _get_qtrees_search_options(self): diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index d2e488dab..96648e09e 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -201,7 +201,12 @@ class Qtree(BASE, DelfinBase): storage_id = Column(String(36)) native_qtree_id = Column(String(255)) native_filesystem_id = Column(String(255)) - quota_id = Column(String(255)) + capacity_hard_limit = Column(BigInteger) + capacity_soft_limit = Column(BigInteger) + file_hard_limit = Column(BigInteger) + file_soft_limit = Column(BigInteger) + used_capacity = Column(BigInteger) + file_count = Column(BigInteger) path = Column(String(255)) security_mode = Column(String(255)) state = Column(String(255)) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index d0060ff56..001bea5c9 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -310,12 +310,23 @@ def list_qtrees(self, ctx): state_len = len(constants.QuotaState.ALL) - 1 security = list(constants.NASSecurityMode.ALL) security_len = len(constants.NASSecurityMode.ALL) - 1 + c_limit = random.randint(1000, 2000) + h_limit = int(random.randint(0, 100) * c_limit / 100) + s_limit = int(random.randint(0, 100) * h_limit / 100) + fc_limit = random.randint(1000, 2000) + fh_limit = int(random.randint(0, 100) * fc_limit / 100) + fs_limit = int(random.randint(0, 100) * fh_limit / 100) c = { "name": "fake_qtree_" + str(idx), "storage_id": self.storage_id, "native_qtree_id": "fake_original_id_" + str(idx), "native_filesystem_id": "fake_filesystem_id_" + str(idx), - "quota_id": random.randint(0, 1000), + "capacity_hard_limit": h_limit, + "capacity_soft_limit": s_limit, + "file_hard_limit": fh_limit, + "file_soft_limit": fs_limit, + "used_capacity": c_limit, + "file_count": fc_limit, "state": state[random.randint(0, state_len)], "security_mode": security[random.randint(0, security_len)], "path": "/", From b469a03bcdcda1fb1ebac0664f244bd5f2da626a Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Thu, 25 Feb 2021 11:46:31 +0530 Subject: [PATCH 7/9] Update unit tests --- delfin/tests/unit/db/test_db_api.py | 185 ++++++++++++++++++ delfin/tests/unit/drivers/test_api.py | 36 ++++ .../tests/unit/task_manager/test_resources.py | 101 ++++++++++ 3 files changed, 322 insertions(+) diff --git a/delfin/tests/unit/db/test_db_api.py b/delfin/tests/unit/db/test_db_api.py index 319f9be99..0871b2b15 100644 --- a/delfin/tests/unit/db/test_db_api.py +++ b/delfin/tests/unit/db/test_db_api.py @@ -399,6 +399,191 @@ def test_disk_get_all(self, mock_session): result = db_api.disk_get_all(ctxt, filters={'status': 'Normal'}) assert len(result) == 0 + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystem_get(self, mock_session): + fake_filesystem = {} + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_filesystem + result = db_api.filesystem_get(ctxt, + 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd') + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystems_update(self, mock_session): + filesystems = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = filesystems + result = db_api.filesystems_update(ctxt, filesystems) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystem_update(self, mock_session): + filesystems = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = filesystems + result = db_api.filesystem_update( + ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', filesystems) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystems_delete(self, mock_session): + fake_filesystem = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd'] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_filesystem + result = db_api.filesystems_delete(ctxt, fake_filesystem) + assert result is None + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystems_create(self, mock_session): + fake_filesystem = [models.Volume()] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_filesystem + result = db_api.filesystems_create(ctxt, fake_filesystem) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystem_create(self, mock_session): + fake_filesystem = models.Volume() + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_filesystem + result = db_api.filesystem_create(ctxt, fake_filesystem) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_filesystem_get_all(self, mock_session): + fake_filesystem = [] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_filesystem + result = db_api.filesystem_get_all(ctxt) + assert len(result) == 0 + + result = db_api.filesystem_get_all(ctxt, filters={'status': 'Normal'}) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtree_get(self, mock_session): + fake_qtree = {} + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_qtree + result = db_api.qtree_get(ctxt, + 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd') + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtrees_update(self, mock_session): + qtrees = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = qtrees + result = db_api.qtrees_update(ctxt, qtrees) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtree_update(self, mock_session): + qtrees = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = qtrees + result = db_api.qtree_update(ctxt, + 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', + qtrees) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtrees_delete(self, mock_session): + fake_qtree = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd'] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_qtree + result = db_api.qtrees_delete(ctxt, fake_qtree) + assert result is None + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtrees_create(self, mock_session): + fake_qtree = [models.Volume()] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_qtree + result = db_api.qtrees_create(ctxt, fake_qtree) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtree_create(self, mock_session): + fake_qtree = models.Volume() + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_qtree + result = db_api.qtree_create(ctxt, fake_qtree) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_qtree_get_all(self, mock_session): + fake_qtree = [] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_qtree + result = db_api.qtree_get_all(ctxt) + assert len(result) == 0 + + result = db_api.qtree_get_all(ctxt, filters={'status': 'Normal'}) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_share_get(self, mock_session): + fake_share = {} + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_share + result = db_api.share_get(ctxt, + 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd') + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_shares_update(self, mock_session): + shares = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = shares + result = db_api.shares_update(ctxt, shares) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_share_update(self, mock_session): + shares = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}] + mock_session.return_value.__enter__.return_value.query.return_value \ + = shares + result = db_api.share_update(ctxt, + 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', + shares) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_shares_delete(self, mock_session): + fake_share = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd'] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_share + result = db_api.shares_delete(ctxt, fake_share) + assert result is None + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_shares_create(self, mock_session): + fake_share = [models.Volume()] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_share + result = db_api.shares_create(ctxt, fake_share) + assert len(result) == 1 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_share_create(self, mock_session): + fake_share = models.Volume() + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_share + result = db_api.share_create(ctxt, fake_share) + assert len(result) == 0 + + @mock.patch('delfin.db.sqlalchemy.api.get_session') + def test_share_get_all(self, mock_session): + fake_share = [] + mock_session.return_value.__enter__.return_value.query.return_value \ + = fake_share + result = db_api.share_get_all(ctxt) + assert len(result) == 0 + + result = db_api.share_get_all(ctxt, filters={'status': 'Normal'}) + assert len(result) == 0 + @mock.patch('delfin.db.sqlalchemy.api.get_session') def test_access_info_get_all(self, mock_session): fake_access_info = [] diff --git a/delfin/tests/unit/drivers/test_api.py b/delfin/tests/unit/drivers/test_api.py index 7e9c7896c..2c93ada98 100644 --- a/delfin/tests/unit/drivers/test_api.py +++ b/delfin/tests/unit/drivers/test_api.py @@ -288,6 +288,42 @@ def test_list_ports(self, driver_manager, mock_fake): driver_manager.assert_called_once() mock_fake.assert_called_once() + @mock.patch.object(FakeStorageDriver, 'list_filesystems') + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_filesystems(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() + mock_fake.return_value = [] + api = API() + storage_id = '12345' + + api.list_filesystems(context, storage_id) + driver_manager.assert_called_once() + mock_fake.assert_called_once() + + @mock.patch.object(FakeStorageDriver, 'list_qtrees') + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_qtrees(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() + mock_fake.return_value = [] + api = API() + storage_id = '12345' + + api.list_qtrees(context, storage_id) + driver_manager.assert_called_once() + mock_fake.assert_called_once() + + @mock.patch.object(FakeStorageDriver, 'list_shares') + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_shares(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() + mock_fake.return_value = [] + api = API() + storage_id = '12345' + + api.list_shares(context, storage_id) + driver_manager.assert_called_once() + mock_fake.assert_called_once() + @mock.patch.object(FakeStorageDriver, 'parse_alert') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') @mock.patch('delfin.db.access_info_get') diff --git a/delfin/tests/unit/task_manager/test_resources.py b/delfin/tests/unit/task_manager/test_resources.py index 96c020129..674a45831 100644 --- a/delfin/tests/unit/task_manager/test_resources.py +++ b/delfin/tests/unit/task_manager/test_resources.py @@ -118,6 +118,58 @@ ] +filesystems_list = [{ + "id": "fe760f5c-7b4c-42b2-b1ed-ecb4f0b6d6bc", + "name": "fake_filesystem_" + str(id), + "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", + "native_filesystem_id": "fake_original_id_" + str(id), + "status": "offline", + "allocation_type": "thin", + "security_mode": "unix", + "total_capacity": 1055, + "used_capacity": 812, + "free_capacity": 243, + "compression": True, + "deduplication": False, + "worm": True +} +] + + +qtrees_list = [{ + "id": "251594c5-aac4-46ad-842f-3daca9176938", + "name": "fake_qtree_" + str(id), + "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", + "native_qtree_id": "fake_original_id_" + str(id), + "native_filesystem_id": "fake_filesystem_id_" + str(id), + "capacity_hard_limit": 316, + "capacity_soft_limit": 170, + "file_hard_limit": 1726, + "file_soft_limit": 759, + "used_capacity": 1093, + "file_count": 1726, + "path": "/", + "security_mode": "native", + "state": "normal" +} +] + + +shares_list = [{ + "id": "4e62c66a-39ef-43f2-9690-e936ca876574", + "name": "fake_share_" + str(id), + "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", + "native_share_id": "fake_original_id_" + str(id), + "native_filesystem_id": "fake_filesystem_id_" + str(id), + "qtree_id": "859", + "type": "nfs", + "offline_mode": "none", + "oplock": True, + "path": "/" +} +] + + class TestStorageDeviceTask(test.TestCase): def setUp(self): super(TestStorageDeviceTask, self).setUp() @@ -399,3 +451,52 @@ def test_remove(self, mock_disk_del): context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') disk_obj.remove() self.assertTrue(mock_disk_del.called) + + +class TestStorageFilesystemTask(test.TestCase): + @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') + @mock.patch('delfin.drivers.api.API.list_filesystems') + @mock.patch('delfin.db.filesystem_get_all') + @mock.patch('delfin.db.filesystems_delete') + @mock.patch('delfin.db.filesystems_update') + @mock.patch('delfin.db.filesystems_create') + def test_sync_successful(self, mock_filesystem_create, + mock_filesystem_update, + mock_filesystem_del, mock_filesystem_get_all, + mock_list_filesystems, + get_lock): + filesystem_obj = resources.StorageFilesystemTask( + context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + filesystem_obj.sync() + self.assertTrue(mock_list_filesystems.called) + self.assertTrue(mock_filesystem_get_all.called) + self.assertTrue(get_lock.called) + + # collect the filesystems from fake_storage + fake_storage_obj = fake_storage.FakeStorageDriver() + + # add the filesystems to DB + mock_list_filesystems.return_value =\ + fake_storage_obj.list_filesystems(context) + mock_filesystem_get_all.return_value = list() + filesystem_obj.sync() + self.assertTrue(mock_filesystem_create.called) + + # update the filesystems to DB + mock_list_filesystems.return_value = filesystems_list + mock_filesystem_get_all.return_value = filesystems_list + filesystem_obj.sync() + self.assertTrue(mock_filesystem_update.called) + + # delete the filesystems to DB + mock_list_filesystems.return_value = list() + mock_filesystem_get_all.return_value = filesystems_list + filesystem_obj.sync() + self.assertTrue(mock_filesystem_del.called) + + @mock.patch('delfin.db.filesystem_delete_by_storage') + def test_remove(self, mock_filesystem_del): + filesystem_obj = resources.StorageFilesystemTask( + context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') + filesystem_obj.remove() + self.assertTrue(mock_filesystem_del.called) From aa15eaeeb79a0b737d3c5edd84c8578a24773552 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Wed, 3 Mar 2021 13:10:20 +0530 Subject: [PATCH 8/9] Update model changes --- delfin/common/constants.py | 38 +++++++---------- delfin/db/sqlalchemy/models.py | 22 ++++------ delfin/drivers/fake_storage/__init__.py | 41 ++++++------------- .../tests/unit/task_manager/test_resources.py | 25 ++++------- 4 files changed, 41 insertions(+), 85 deletions(-) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 1f4ea17be..13c8257a2 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -151,31 +151,30 @@ class DiskLogicalType(object): class FilesystemStatus(object): NORMAL = 'normal' - OFFLINE = 'offline' - UNKNOWN = 'unknown' + FAULTY = 'faulty' + + ALL = (NORMAL, FAULTY) - ALL = (NORMAL, OFFLINE, UNKNOWN) + +class WORMType(object): + NON_WORM = 'non_worm' + AUDIT_LOG = 'audit_log' + COMPLIANCE = 'compliance' + ENTERPRISE = 'enterprise' + + ALL = (NON_WORM, AUDIT_LOG, COMPLIANCE, ENTERPRISE) class NASSecurityMode(object): MIXED = 'mixed' NATIVE = 'native' - WINDOWS = 'windows' + NTFS = 'ntfs' UNIX = 'unix' - ALL = (MIXED, NATIVE, WINDOWS, UNIX) - - -class QuotaState(object): - NORMAL = 'normal' - SOFT = 'soft_limit' - HARD = 'hard_limit' - ABNORMAL = 'abnormal' - - ALL = (NORMAL, SOFT, HARD, ABNORMAL) + ALL = (MIXED, NATIVE, NTFS, UNIX) -class ShareType(object): +class ShareProtocol(object): CIFS = 'cifs' NFS = 'nfs' FTP = 'ftp' @@ -184,15 +183,6 @@ class ShareType(object): ALL = (CIFS, NFS, FTP, UNKNOWN) -class ShareOfflineMode(object): - MANUAL = 'manual' - DOCUMENTS = 'documents' - PROGRAMS = 'programs' - NONE = 'none' - - ALL = (MANUAL, DOCUMENTS, PROGRAMS, NONE) - - # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index 96648e09e..d6bde33a6 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -182,15 +182,16 @@ class Filesystem(BASE, DelfinBase): name = Column(String(255)) storage_id = Column(String(36)) native_filesystem_id = Column(String(255)) + native_pool_id = Column(String(255)) status = Column(String(255)) - allocation_type = Column(String(255)) + type = Column(String(255)) security_mode = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) - compression = Column(Boolean) - deduplication = Column(Boolean) - worm = Column(Boolean) + compressed = Column(Boolean) + deduplicated = Column(Boolean) + worm = Column(String(255)) class Qtree(BASE, DelfinBase): @@ -201,15 +202,8 @@ class Qtree(BASE, DelfinBase): storage_id = Column(String(36)) native_qtree_id = Column(String(255)) native_filesystem_id = Column(String(255)) - capacity_hard_limit = Column(BigInteger) - capacity_soft_limit = Column(BigInteger) - file_hard_limit = Column(BigInteger) - file_soft_limit = Column(BigInteger) - used_capacity = Column(BigInteger) - file_count = Column(BigInteger) path = Column(String(255)) security_mode = Column(String(255)) - state = Column(String(255)) class Share(BASE, DelfinBase): @@ -220,10 +214,8 @@ class Share(BASE, DelfinBase): storage_id = Column(String(36)) native_share_id = Column(String(255)) native_filesystem_id = Column(String(255)) - qtree_id = Column(String(255)) - type = Column(String(255)) - offline_mode = Column(String(255)) - oplock = Column(Boolean) + native_qtree_id = Column(String(255)) + protocol = Column(String(255)) path = Column(String(255)) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 001bea5c9..1c1da83c2 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -278,6 +278,8 @@ def list_filesystems(self, ctx): boolean = [True, False] sts = list(constants.FilesystemStatus.ALL) sts_len = len(constants.FilesystemStatus.ALL) - 1 + worm = list(constants.WORMType.ALL) + worm_len = len(constants.WORMType.ALL) - 1 alloc_type = list(constants.VolumeType.ALL) alloc_type_len = len(constants.VolumeType.ALL) - 1 security = list(constants.NASSecurityMode.ALL) @@ -286,16 +288,16 @@ def list_filesystems(self, ctx): "name": "fake_filesystem_" + str(idx), "storage_id": self.storage_id, "native_filesystem_id": "fake_original_id_" + str(idx), + "native_pool_id": "fake_pool_id_" + str(idx), "status": sts[random.randint(0, sts_len)], - "allocation_type": - alloc_type[random.randint(0, alloc_type_len)], + "type": alloc_type[random.randint(0, alloc_type_len)], "security_mode": security[random.randint(0, security_len)], "total_capacity": total, "used_capacity": used, "free_capacity": free, - "worm": boolean[random.randint(0, 1)], - "deduplication": boolean[random.randint(0, 1)], - "compression": boolean[random.randint(0, 1)], + "worm": worm[random.randint(0, worm_len)], + "deduplicated": boolean[random.randint(0, 1)], + "compressed": boolean[random.randint(0, 1)], } filesystem_list.append(c) return filesystem_list @@ -306,28 +308,14 @@ def list_qtrees(self, ctx): % (self.storage_id, rd_qtrees_count)) qtree_list = [] for idx in range(rd_qtrees_count): - state = list(constants.QuotaState.ALL) - state_len = len(constants.QuotaState.ALL) - 1 security = list(constants.NASSecurityMode.ALL) security_len = len(constants.NASSecurityMode.ALL) - 1 - c_limit = random.randint(1000, 2000) - h_limit = int(random.randint(0, 100) * c_limit / 100) - s_limit = int(random.randint(0, 100) * h_limit / 100) - fc_limit = random.randint(1000, 2000) - fh_limit = int(random.randint(0, 100) * fc_limit / 100) - fs_limit = int(random.randint(0, 100) * fh_limit / 100) + c = { "name": "fake_qtree_" + str(idx), "storage_id": self.storage_id, "native_qtree_id": "fake_original_id_" + str(idx), "native_filesystem_id": "fake_filesystem_id_" + str(idx), - "capacity_hard_limit": h_limit, - "capacity_soft_limit": s_limit, - "file_hard_limit": fh_limit, - "file_soft_limit": fs_limit, - "used_capacity": c_limit, - "file_count": fc_limit, - "state": state[random.randint(0, state_len)], "security_mode": security[random.randint(0, security_len)], "path": "/", } @@ -340,20 +328,15 @@ def list_shares(self, ctx): % (self.storage_id, rd_shares_count)) share_list = [] for idx in range(rd_shares_count): - boolean = [True, False] - st = list(constants.ShareType.ALL) - st_len = len(constants.ShareType.ALL) - 1 - mode = list(constants.ShareOfflineMode.ALL) - mode_len = len(constants.ShareOfflineMode.ALL) - 1 + pro = list(constants.ShareProtocol.ALL) + pro_len = len(constants.ShareProtocol.ALL) - 1 c = { "name": "fake_share_" + str(idx), "storage_id": self.storage_id, "native_share_id": "fake_original_id_" + str(idx), "native_filesystem_id": "fake_filesystem_id_" + str(idx), - "qtree_id": random.randint(0, 1000), - "type": st[random.randint(0, st_len)], - "offline_mode": mode[random.randint(0, mode_len)], - "oplock": boolean[random.randint(0, 1)], + "native_qtree_id": "fake_qtree_id_" + str(idx), + "protocol": pro[random.randint(0, pro_len)], "path": "/", } share_list.append(c) diff --git a/delfin/tests/unit/task_manager/test_resources.py b/delfin/tests/unit/task_manager/test_resources.py index 674a45831..bad85e9e0 100644 --- a/delfin/tests/unit/task_manager/test_resources.py +++ b/delfin/tests/unit/task_manager/test_resources.py @@ -123,15 +123,15 @@ "name": "fake_filesystem_" + str(id), "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_filesystem_id": "fake_original_id_" + str(id), - "status": "offline", - "allocation_type": "thin", + "status": "normal", + "type": "thin", "security_mode": "unix", "total_capacity": 1055, "used_capacity": 812, "free_capacity": 243, - "compression": True, - "deduplication": False, - "worm": True + "compressed": True, + "deduplicated": False, + "worm": "non_worm" } ] @@ -142,15 +142,8 @@ "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_qtree_id": "fake_original_id_" + str(id), "native_filesystem_id": "fake_filesystem_id_" + str(id), - "capacity_hard_limit": 316, - "capacity_soft_limit": 170, - "file_hard_limit": 1726, - "file_soft_limit": 759, - "used_capacity": 1093, - "file_count": 1726, "path": "/", - "security_mode": "native", - "state": "normal" + "security_mode": "native" } ] @@ -161,10 +154,8 @@ "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_share_id": "fake_original_id_" + str(id), "native_filesystem_id": "fake_filesystem_id_" + str(id), - "qtree_id": "859", - "type": "nfs", - "offline_mode": "none", - "oplock": True, + "native_qtree_id": "859", + "protocol": "nfs", "path": "/" } ] From e4c64a793615fc4497598fb341eda5ab15d81e91 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Thu, 11 Mar 2021 15:44:56 +0530 Subject: [PATCH 9/9] Implement review comments --- delfin/common/constants.py | 4 +- delfin/db/sqlalchemy/models.py | 56 +++++++++++------------ delfin/drivers/api.py | 16 ++----- delfin/drivers/driver.py | 61 +++---------------------- delfin/drivers/fake_storage/__init__.py | 2 +- delfin/task_manager/tasks/resources.py | 9 ++++ 6 files changed, 49 insertions(+), 99 deletions(-) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 13c8257a2..992d29b8c 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -178,9 +178,9 @@ class ShareProtocol(object): CIFS = 'cifs' NFS = 'nfs' FTP = 'ftp' - UNKNOWN = 'unknown' + HDFS = 'hdfs' - ALL = (CIFS, NFS, FTP, UNKNOWN) + ALL = (CIFS, NFS, FTP, HDFS) # Enumerations for alert severity diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index d6bde33a6..c9bb2abea 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -65,19 +65,19 @@ class Storage(BASE, DelfinBase): __tablename__ = 'storages' id = Column(String(36), primary_key=True) name = Column(String(255)) - vendor = Column(String(255)) description = Column(String(255)) - model = Column(String(255)) + location = Column(String(255)) status = Column(String(255)) + sync_status = Column(Integer, default=constants.SyncStatus.SYNCED) + vendor = Column(String(255)) + model = Column(String(255)) serial_number = Column(String(255)) firmware_version = Column(String(255)) - location = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) raw_capacity = Column(BigInteger) subscribed_capacity = Column(BigInteger) - sync_status = Column(Integer, default=constants.SyncStatus.SYNCED) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -86,14 +86,14 @@ class Volume(BASE, DelfinBase): """Represents a volume object.""" __tablename__ = 'volumes' id = Column(String(36), primary_key=True) + native_volume_id = Column(String(255)) name = Column(String(255)) - storage_id = Column(String(36)) - native_storage_pool_id = Column(String(255)) description = Column(String(255)) + type = Column(String(255)) status = Column(String(255)) - native_volume_id = Column(String(255)) + storage_id = Column(String(36)) + native_storage_pool_id = Column(String(255)) wwn = Column(String(255)) - type = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) @@ -105,12 +105,12 @@ class StoragePool(BASE, DelfinBase): """Represents a storage_pool object.""" __tablename__ = 'storage_pools' id = Column(String(36), primary_key=True) - name = Column(String(255)) - storage_id = Column(String(36)) native_storage_pool_id = Column(String(255)) + name = Column(String(255)) description = Column(String(255)) - status = Column(String(255)) storage_type = Column(String(255)) + status = Column(String(255)) + storage_id = Column(String(36)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) @@ -123,19 +123,19 @@ class Disk(BASE, DelfinBase): id = Column(String(36), primary_key=True) native_disk_id = Column(String(255)) name = Column(String(255)) + physical_type = Column(String(255)) + logical_type = Column(String(255)) + status = Column(String(255)) + location = Column(String(255)) + storage_id = Column(String(255)) + native_disk_group_id = Column(String(255)) serial_number = Column(String(255)) manufacturer = Column(String(255)) model = Column(String(255)) firmware = Column(String(255)) speed = Column(Integer) capacity = Column(BigInteger) - status = Column(String(255)) - physical_type = Column(String(255)) - logical_type = Column(String(255)) health_score = Column(Integer) - native_disk_group_id = Column(String(255)) - storage_id = Column(String(255)) - location = Column(String(255)) class Controller(BASE, DelfinBase): @@ -159,14 +159,14 @@ class Port(BASE, DelfinBase): native_port_id = Column(String(255)) name = Column(String(255)) location = Column(String(255)) - connection_status = Column(String(255)) - health_status = Column(String(255)) type = Column(String(255)) logical_type = Column(String(255)) - speed = Column(Integer) - max_speed = Column(Integer) + connection_status = Column(String(255)) + health_status = Column(String(255)) storage_id = Column(String(36)) native_parent_id = Column(String(255)) + speed = Column(Integer) + max_speed = Column(Integer) wwn = Column(String(255)) mac_address = Column(String(255)) ipv4 = Column(String(255)) @@ -179,12 +179,12 @@ class Filesystem(BASE, DelfinBase): """Represents a filesystem object.""" __tablename__ = 'filesystems' id = Column(String(36), primary_key=True) + native_filesystem_id = Column(String(255)) name = Column(String(255)) + type = Column(String(255)) + status = Column(String(255)) storage_id = Column(String(36)) - native_filesystem_id = Column(String(255)) native_pool_id = Column(String(255)) - status = Column(String(255)) - type = Column(String(255)) security_mode = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) @@ -198,11 +198,11 @@ class Qtree(BASE, DelfinBase): """Represents a qtree object.""" __tablename__ = 'qtrees' id = Column(String(36), primary_key=True) + native_qtree_id = Column(String(255)) name = Column(String(255)) + path = Column(String(255)) storage_id = Column(String(36)) - native_qtree_id = Column(String(255)) native_filesystem_id = Column(String(255)) - path = Column(String(255)) security_mode = Column(String(255)) @@ -210,13 +210,13 @@ class Share(BASE, DelfinBase): """Represents a share object.""" __tablename__ = 'shares' id = Column(String(36), primary_key=True) + native_share_id = Column(String(255)) name = Column(String(255)) + path = Column(String(255)) storage_id = Column(String(36)) - native_share_id = Column(String(255)) native_filesystem_id = Column(String(255)) native_qtree_id = Column(String(255)) protocol = Column(String(255)) - path = Column(String(255)) class AlertSource(BASE, DelfinBase): diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index 3c0801c69..0880a8115 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -20,7 +20,6 @@ from delfin import db from delfin.drivers import helper from delfin.drivers import manager -from delfin.drivers.driver import NASDriver LOG = log.getLogger(__name__) @@ -106,26 +105,17 @@ def list_disks(self, context, storage_id): def list_filesystems(self, context, storage_id): """List all filesystems from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) - if isinstance(driver, NASDriver): - return driver.list_filesystems(context) - else: - return [] + return driver.list_filesystems(context) def list_qtrees(self, context, storage_id): """List all qtrees from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) - if isinstance(driver, NASDriver): - return driver.list_qtrees(context) - else: - return [] + return driver.list_qtrees(context) def list_shares(self, context, storage_id): """List all shares from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) - if isinstance(driver, NASDriver): - return driver.list_shares(context) - else: - return [] + return driver.list_shares(context) def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index 9f2001980..9522f3f72 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -116,66 +116,17 @@ def clear_alert(self, context, sequence_number): """Clear alert from storage system.""" pass - -@six.add_metaclass(abc.ABCMeta) -class NASDriver(StorageDriver): - - def __init__(self, **kwargs): - """ - :param kwargs: A dictionary, include access information. Pay - attention that it's not safe to save username and password - in memory, so suggest each driver use them to get session - instead of save them in memory directly. - """ - super(NASDriver, self).__init__(**kwargs) - - @abc.abstractmethod def list_filesystems(self, context): """List all filesystems from storage system.""" - pass + raise NotImplementedError( + "Driver API list_filesystems() is not Implemented") - @abc.abstractmethod def list_qtrees(self, context): """List all qtrees from storage system.""" - pass + raise NotImplementedError( + "Driver API list_qtrees() is not Implemented") - @abc.abstractmethod def list_shares(self, context): """List all shares from storage system.""" - pass - - # @abc.abstractmethod - # def list_quotas(self, context): - # """List all quota from storage system.""" - # pass - - -@six.add_metaclass(abc.ABCMeta) -class SANDriver(StorageDriver): - - def __init__(self, **kwargs): - """ - :param kwargs: A dictionary, include access information. Pay - attention that it's not safe to save username and password - in memory, so suggest each driver use them to get session - instead of save them in memory directly. - """ - super(SANDriver, self).__init__(**kwargs) - - # @abc.abstractmethod - # def list_blocks(self, context): - # """List all blocks from storage system.""" - # pass - - -@six.add_metaclass(abc.ABCMeta) -class UnifiedStorageDriver(SANDriver, NASDriver): - - def __init__(self, **kwargs): - """ - :param kwargs: A dictionary, include access information. Pay - attention that it's not safe to save username and password - in memory, so suggest each driver use them to get session - instead of save them in memory directly. - """ - super(UnifiedStorageDriver, self).__init__(**kwargs) + raise NotImplementedError( + "Driver API list_shares() is not Implemented") diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 1c1da83c2..0371087c9 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -83,7 +83,7 @@ def _wait(f, *a, **k): return _wait -class FakeStorageDriver(driver.UnifiedStorageDriver): +class FakeStorageDriver(driver.StorageDriver): """FakeStorageDriver shows how to implement the StorageDriver, it also plays a role as faker to fake data for being tested by clients. """ diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index a6b0dc953..ad15d3755 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -435,6 +435,9 @@ def sync(self): db.filesystems_create(self.context, add_list) except AttributeError as e: LOG.error(e) + except NotImplementedError: + # Ignore this exception because driver may not support it. + pass except Exception as e: msg = _('Failed to sync filesystems entry in DB: {0}' .format(e)) @@ -486,6 +489,9 @@ def sync(self): db.qtrees_create(self.context, add_list) except AttributeError as e: LOG.error(e) + except NotImplementedError: + # Ignore this exception because driver may not support it. + pass except Exception as e: msg = _('Failed to sync Qtrees entry in DB: {0}' .format(e)) @@ -537,6 +543,9 @@ def sync(self): db.shares_create(self.context, add_list) except AttributeError as e: LOG.error(e) + except NotImplementedError: + # Ignore this exception because driver may not support it. + pass except Exception as e: msg = _('Failed to sync Shares entry in DB: {0}' .format(e))