From 73143bb5eb3cc3946f0980f0ec015f3d6bd7e714 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Fri, 26 Aug 2022 17:47:34 -0700 Subject: [PATCH 01/11] Fixed Typo for PR #118. Implemented first changes for #121.. First 3 tests passed, moving on to next group of tests. --- atlasapi/atlas.py | 59 ++++++++++++++-------------------------- atlasapi/network.py | 12 ++++---- atlasapi/settings.py | 3 +- atlasapi/specs.py | 1 + tests/test_events.py | 3 ++ tests/test_monitoring.py | 15 ++++++---- 6 files changed, 41 insertions(+), 52 deletions(-) diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py index 237f514..f75eccf 100644 --- a/atlasapi/atlas.py +++ b/atlasapi/atlas.py @@ -25,7 +25,8 @@ from dateutil.relativedelta import relativedelta from atlasapi.specs import Host, ListOfHosts, DatabaseUsersUpdatePermissionsSpecs, DatabaseUsersPermissionsSpecs, \ ReplicaSetTypes -from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, OptionalAtlasMeasurement +from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, \ + OptionalAtlasMeasurement from typing import Union, Iterator, List, Optional from atlasapi.atlas_types import OptionalInt, OptionalBool, ListofDict from atlasapi.clusters import ClusterConfig, ShardedClusterConfig, AtlasBasicReplicaSet, \ @@ -446,52 +447,31 @@ def __init__(self, atlas): self.host_list_with_measurements: Optional[List[Host]] = list() self.host_list: Optional[List[Host]] = list() - def _get_all_hosts(self, pageNum=Settings.pageNum, - itemsPerPage=Settings.itemsPerPage, - iterable=False): + def _get_all_hosts(self): """Get All Hosts (actually processes) Internal use only, actual data retrieval comes from properties host_list and host_names - url: https://docs.atlas.mongodb.com/reference/api/alerts-get-all-alerts/ + url: https://www.mongodb.com/docs/atlas/reference/api/processes-get-all/ Keyword Args: - pageNum (int): Page number - itemsPerPage (int): Number of Users per Page - iterable (bool): To return an iterable high level object instead of a low level API response - Returns: - ListOfHosts or dict: Iterable object representing this function OR Response payload - Raises: - ErrPaginationLimits: Out of limits - :rtype: Union[ListOfHosts, dict] - :type iterable: OptionalBool - :type itemsPerPage: OptionalInt - :type pageNum: OptionalInt + Returns: + ListOfHosts: Iterable object representing this function """ + uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format( + group_id=self.atlas.group) - # Check limits and raise an Exception if needed - ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage) - - if iterable: - item_list = list(HostsGetAll(self.atlas, pageNum, itemsPerPage)) - obj_list = list() - for item in item_list: - obj_list.append(Host(item)) - - return_val = obj_list - else: - uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format( - group_id=self.atlas.group, - page_num=pageNum, - items_per_page=itemsPerPage) - - return_val = self.atlas.network.get(Settings.BASE_URL + uri) - - return return_val + try: + response = self.atlas.network.get(Settings.BASE_URL+uri) + for page in response: + for each_process in page.get("results"): + yield Host(each_process) + except Exception as e: + raise e - def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: + def fill_host_list(self, for_cluster: Optional[str] = None) -> Iterable[Host]: """ Fills the `self.hostname` property with the current hosts for the project/group. @@ -502,9 +482,9 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: for_cluster (str): The name of the cluster for filter the host list. Returns: - List[Host]: A lost of `Host` objects + Iterable[Host]: Yields `Host` objects """ - host_list = self._get_all_hosts(iterable=True) + host_list = self._get_all_hosts() if for_cluster: out_list = list() for host in host_list: @@ -514,7 +494,7 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: out_list.append(host) self.host_list = out_list else: - self.host_list = self._get_all_hosts(iterable=True) + self.host_list = list(self._get_all_hosts()) return self.host_list @@ -2105,6 +2085,7 @@ class HostsGetAll(AtlasPagination): def __init__(self, atlas: Atlas, pageNum: int, itemsPerPage: int): super().__init__(atlas, atlas.Hosts._get_all_hosts, pageNum, itemsPerPage) + class DatabaseUsersGetAll(AtlasPagination): """Pagination for Database User : Get All""" diff --git a/atlasapi/network.py b/atlasapi/network.py index a34ceb2..170db94 100644 --- a/atlasapi/network.py +++ b/atlasapi/network.py @@ -153,22 +153,22 @@ def _paginate(self, method , url, **kwargs): next_page = self.answer(request.status_code, request.json()) yield next_page except Exception as e: - logger.warning('Request: {}'.format(request.request.__dict__)) - logger.warning('Response: {}'.format(request.__dict__)) + #logger.warning('Request: {}'.format(request.__dict__)) + #logger.warning('Response: {}'.format(request.__dict__)) raise e finally: if session: session.close() - + def get(self, uri): """Get request - + Args: uri (str): URI - + Returns: Json: API response - + Raises: Exception: Network issue """ diff --git a/atlasapi/settings.py b/atlasapi/settings.py index 2cc8902..a9b99f6 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -34,8 +34,7 @@ class Settings: "Get One Project": URI_STUB + "/groups/{GROUP_ID}" }, "Monitoring and Logs": { - "Get all processes for group": "/api/atlas/v1.0/groups/{group_id}/processes?pageNum={" - "page_num}&itemsPerPage={items_per_page}", + "Get all processes for group": URI_STUB + "/groups/{group_id}/processes", "Get information for process in group": "/api/atlas/v1.0/groups/%s/processes/%s:&s?pageNum=%d" "&itemsPerPage=%d", "Get measurement for host": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{" diff --git a/atlasapi/specs.py b/atlasapi/specs.py index ecb442e..460c28f 100644 --- a/atlasapi/specs.py +++ b/atlasapi/specs.py @@ -331,6 +331,7 @@ def data_partition_stats(self, atlas_obj, granularity: Optional[AtlasGranulariti Hard codes the name of the partition to `data` and returns all metrics. Args: + period: atlas_obj: Instantiated Atlas instance to access the API granularity (Optional[AtlasGranularitues]): The granularity for the disk measurements. atlas_obj (atlasapi.atlas.Atlas): A configured Atlas instance to connect to the API with. diff --git a/tests/test_events.py b/tests/test_events.py index 82cbb1b..ce79d78 100644 --- a/tests/test_events.py +++ b/tests/test_events.py @@ -83,3 +83,6 @@ def test_04_CPS(self): verbose_logger.warning(f'The count of CPS Events is {len}') test_04_CPS.basic = True + + + #TODO: Add tests which confirm validity of the returned object. diff --git a/tests/test_monitoring.py b/tests/test_monitoring.py index 4501427..17f0831 100644 --- a/tests/test_monitoring.py +++ b/tests/test_monitoring.py @@ -25,20 +25,25 @@ # noinspection PyTypeChecker class MeasurementTests(BaseTests): + + def test_00_get_method_test(self): + for each_host in self.a.Hosts._get_all_hosts(): + self.assertIsInstance(each_host, Host) + + test_00_get_method_test.basic = True def test_00_get_hosts_count(self): atlas: Atlas = self.a atlas.Hosts.fill_host_list() + logger.warning("Found {len(atlas.Hosts.host_list)} hosts") self.assertGreater(len(atlas.Hosts.host_list), 2) test_00_get_hosts_count.basic = True def test_01_get_cluster_names(self): self.a.Hosts.fill_host_list() - cluster_list = self.a.Hosts.cluster_list - - # for each_cluster in cluster_list: - # pprint(each_cluster) - self.assertGreater(len(cluster_list), 0) + cluster_list = self.a.Hosts.host_names + for each_cluster in cluster_list: + self.assertIsInstance(each_cluster, str) test_01_get_cluster_names.basic = True From 9c67a6e86365adabf4772c09a2c784580ff237ba Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Fri, 26 Aug 2022 17:47:34 -0700 Subject: [PATCH 02/11] Implemented first changes for #121.. First 3 tests passed, moving on to next group of tests. --- atlasapi/atlas.py | 59 ++++++++++++++-------------------------- atlasapi/network.py | 12 ++++---- atlasapi/settings.py | 3 +- atlasapi/specs.py | 1 + tests/test_events.py | 3 ++ tests/test_monitoring.py | 15 ++++++---- 6 files changed, 41 insertions(+), 52 deletions(-) diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py index 237f514..f75eccf 100644 --- a/atlasapi/atlas.py +++ b/atlasapi/atlas.py @@ -25,7 +25,8 @@ from dateutil.relativedelta import relativedelta from atlasapi.specs import Host, ListOfHosts, DatabaseUsersUpdatePermissionsSpecs, DatabaseUsersPermissionsSpecs, \ ReplicaSetTypes -from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, OptionalAtlasMeasurement +from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, \ + OptionalAtlasMeasurement from typing import Union, Iterator, List, Optional from atlasapi.atlas_types import OptionalInt, OptionalBool, ListofDict from atlasapi.clusters import ClusterConfig, ShardedClusterConfig, AtlasBasicReplicaSet, \ @@ -446,52 +447,31 @@ def __init__(self, atlas): self.host_list_with_measurements: Optional[List[Host]] = list() self.host_list: Optional[List[Host]] = list() - def _get_all_hosts(self, pageNum=Settings.pageNum, - itemsPerPage=Settings.itemsPerPage, - iterable=False): + def _get_all_hosts(self): """Get All Hosts (actually processes) Internal use only, actual data retrieval comes from properties host_list and host_names - url: https://docs.atlas.mongodb.com/reference/api/alerts-get-all-alerts/ + url: https://www.mongodb.com/docs/atlas/reference/api/processes-get-all/ Keyword Args: - pageNum (int): Page number - itemsPerPage (int): Number of Users per Page - iterable (bool): To return an iterable high level object instead of a low level API response - Returns: - ListOfHosts or dict: Iterable object representing this function OR Response payload - Raises: - ErrPaginationLimits: Out of limits - :rtype: Union[ListOfHosts, dict] - :type iterable: OptionalBool - :type itemsPerPage: OptionalInt - :type pageNum: OptionalInt + Returns: + ListOfHosts: Iterable object representing this function """ + uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format( + group_id=self.atlas.group) - # Check limits and raise an Exception if needed - ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage) - - if iterable: - item_list = list(HostsGetAll(self.atlas, pageNum, itemsPerPage)) - obj_list = list() - for item in item_list: - obj_list.append(Host(item)) - - return_val = obj_list - else: - uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format( - group_id=self.atlas.group, - page_num=pageNum, - items_per_page=itemsPerPage) - - return_val = self.atlas.network.get(Settings.BASE_URL + uri) - - return return_val + try: + response = self.atlas.network.get(Settings.BASE_URL+uri) + for page in response: + for each_process in page.get("results"): + yield Host(each_process) + except Exception as e: + raise e - def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: + def fill_host_list(self, for_cluster: Optional[str] = None) -> Iterable[Host]: """ Fills the `self.hostname` property with the current hosts for the project/group. @@ -502,9 +482,9 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: for_cluster (str): The name of the cluster for filter the host list. Returns: - List[Host]: A lost of `Host` objects + Iterable[Host]: Yields `Host` objects """ - host_list = self._get_all_hosts(iterable=True) + host_list = self._get_all_hosts() if for_cluster: out_list = list() for host in host_list: @@ -514,7 +494,7 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]: out_list.append(host) self.host_list = out_list else: - self.host_list = self._get_all_hosts(iterable=True) + self.host_list = list(self._get_all_hosts()) return self.host_list @@ -2105,6 +2085,7 @@ class HostsGetAll(AtlasPagination): def __init__(self, atlas: Atlas, pageNum: int, itemsPerPage: int): super().__init__(atlas, atlas.Hosts._get_all_hosts, pageNum, itemsPerPage) + class DatabaseUsersGetAll(AtlasPagination): """Pagination for Database User : Get All""" diff --git a/atlasapi/network.py b/atlasapi/network.py index a34ceb2..170db94 100644 --- a/atlasapi/network.py +++ b/atlasapi/network.py @@ -153,22 +153,22 @@ def _paginate(self, method , url, **kwargs): next_page = self.answer(request.status_code, request.json()) yield next_page except Exception as e: - logger.warning('Request: {}'.format(request.request.__dict__)) - logger.warning('Response: {}'.format(request.__dict__)) + #logger.warning('Request: {}'.format(request.__dict__)) + #logger.warning('Response: {}'.format(request.__dict__)) raise e finally: if session: session.close() - + def get(self, uri): """Get request - + Args: uri (str): URI - + Returns: Json: API response - + Raises: Exception: Network issue """ diff --git a/atlasapi/settings.py b/atlasapi/settings.py index 2cc8902..a9b99f6 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -34,8 +34,7 @@ class Settings: "Get One Project": URI_STUB + "/groups/{GROUP_ID}" }, "Monitoring and Logs": { - "Get all processes for group": "/api/atlas/v1.0/groups/{group_id}/processes?pageNum={" - "page_num}&itemsPerPage={items_per_page}", + "Get all processes for group": URI_STUB + "/groups/{group_id}/processes", "Get information for process in group": "/api/atlas/v1.0/groups/%s/processes/%s:&s?pageNum=%d" "&itemsPerPage=%d", "Get measurement for host": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{" diff --git a/atlasapi/specs.py b/atlasapi/specs.py index ecb442e..460c28f 100644 --- a/atlasapi/specs.py +++ b/atlasapi/specs.py @@ -331,6 +331,7 @@ def data_partition_stats(self, atlas_obj, granularity: Optional[AtlasGranulariti Hard codes the name of the partition to `data` and returns all metrics. Args: + period: atlas_obj: Instantiated Atlas instance to access the API granularity (Optional[AtlasGranularitues]): The granularity for the disk measurements. atlas_obj (atlasapi.atlas.Atlas): A configured Atlas instance to connect to the API with. diff --git a/tests/test_events.py b/tests/test_events.py index 82cbb1b..ce79d78 100644 --- a/tests/test_events.py +++ b/tests/test_events.py @@ -83,3 +83,6 @@ def test_04_CPS(self): verbose_logger.warning(f'The count of CPS Events is {len}') test_04_CPS.basic = True + + + #TODO: Add tests which confirm validity of the returned object. diff --git a/tests/test_monitoring.py b/tests/test_monitoring.py index 4501427..17f0831 100644 --- a/tests/test_monitoring.py +++ b/tests/test_monitoring.py @@ -25,20 +25,25 @@ # noinspection PyTypeChecker class MeasurementTests(BaseTests): + + def test_00_get_method_test(self): + for each_host in self.a.Hosts._get_all_hosts(): + self.assertIsInstance(each_host, Host) + + test_00_get_method_test.basic = True def test_00_get_hosts_count(self): atlas: Atlas = self.a atlas.Hosts.fill_host_list() + logger.warning("Found {len(atlas.Hosts.host_list)} hosts") self.assertGreater(len(atlas.Hosts.host_list), 2) test_00_get_hosts_count.basic = True def test_01_get_cluster_names(self): self.a.Hosts.fill_host_list() - cluster_list = self.a.Hosts.cluster_list - - # for each_cluster in cluster_list: - # pprint(each_cluster) - self.assertGreater(len(cluster_list), 0) + cluster_list = self.a.Hosts.host_names + for each_cluster in cluster_list: + self.assertIsInstance(each_cluster, str) test_01_get_cluster_names.basic = True From 9b36e518a22f40eeb6cc1d1d0fec1bccddfead14 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sat, 27 Aug 2022 18:48:51 -0700 Subject: [PATCH 03/11] Progress on #121, tests up to test_21. Partitions and databases endpoints remain. --- atlasapi/atlas.py | 32 +++++++++++++++++--------------- atlasapi/settings.py | 11 ++++++----- atlasapi/specs.py | 27 ++++++++++----------------- tests/test_monitoring.py | 4 ++++ 4 files changed, 37 insertions(+), 37 deletions(-) diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py index f75eccf..d33ffec 100644 --- a/atlasapi/atlas.py +++ b/atlasapi/atlas.py @@ -464,7 +464,7 @@ def _get_all_hosts(self): group_id=self.atlas.group) try: - response = self.atlas.network.get(Settings.BASE_URL+uri) + response = self.atlas.network.get(Settings.BASE_URL + uri) for page in response: for each_process in page.get("results"): yield Host(each_process) @@ -749,10 +749,8 @@ def get_logs_for_cluster(self, def _get_measurement_for_host(self, host_obj: Host, granularity: Optional[AtlasGranularities] = None, period: Optional[AtlasPeriods] = None, - measurement: Optional[AtlasMeasurementTypes] = None, - pageNum: int = Settings.pageNum, - itemsPerPage: int = Settings.itemsPerPage, - iterable: bool = True) -> Union[dict, Iterable[AtlasMeasurement]]: + measurement: Optional[AtlasMeasurementTypes] = None + ) -> Iterable[AtlasMeasurement]: """Get measurement(s) for a host Internal use only, should come from the host obj itself. @@ -783,9 +781,6 @@ def _get_measurement_for_host(self, host_obj: Host, ErrPaginationLimits: Out of limits :rtype: List[measurements.AtlasMeasurement] - :type iterable: OptionalBool - :type itemsPerPage: OptionalInt - :type pageNum: OptionalInt :type period: AtlasPeriods :type granularity: AtlasGranularities :type host_obj: Host @@ -793,15 +788,14 @@ def _get_measurement_for_host(self, host_obj: Host, """ - # Check limits and raise an Exception if needed - ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage) + # Set default measurement, period and granularity if none are sent if measurement is None: measurement = AtlasMeasurementTypes.Cache.dirty if period is None: period = AtlasPeriods.WEEKS_1 - if granularity is None: granularity = AtlasGranularities.HOUR + # Check to see if we received a leaf or branch of the measurements logger.debug(f'Measurement is: {measurement}') logger.debug(f'Measurement object type is {type(measurement)}') @@ -837,9 +831,18 @@ def _get_measurement_for_host(self, host_obj: Host, logger.debug(f'The URI used will be {uri}') # Build the request return_val = self.atlas.network.get(Settings.BASE_URL + uri) + for each_host in return_val: + try: + measurements = each_host.get('measurements') + except Exception as e: + logger.error(f"Error getting measurements from results") - if iterable: - measurements = return_val.get('measurements') + logger.error(e) + logger.error(f"The results look like {results}") + logger.error(f"The results have length {len(list(results))}") + for each in results: + logger.error(f"Results are: {each}") + raise e measurements_count = len(measurements) self.logger.info('There are {} measurements.'.format(measurements_count)) @@ -852,8 +855,7 @@ def _get_measurement_for_host(self, host_obj: Host, yield measurement_obj - else: - return return_val + class _Events: """Events API diff --git a/atlasapi/settings.py b/atlasapi/settings.py index a9b99f6..fd8caa1 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -25,7 +25,7 @@ class Settings: # Atlas APIs BASE_URL = getenv('BASE_URL', 'https://cloud.mongodb.com') URI_STUB = '/api/atlas/v1.0' - + # Pagination defaults ITEMS_PER_PAGE: int = int(os.getenv('ITEMS_PER_PAGE', 500)) @@ -35,10 +35,11 @@ class Settings: }, "Monitoring and Logs": { "Get all processes for group": URI_STUB + "/groups/{group_id}/processes", - "Get information for process in group": "/api/atlas/v1.0/groups/%s/processes/%s:&s?pageNum=%d" - "&itemsPerPage=%d", - "Get measurement for host": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{" - "port}/measurements?granularity={granularity}&period={period}&m={measurement}", + "Get information for process in group": URI_STUB + "/groups/%s/processes/%s:&s?pageNum=%d" + "&itemsPerPage=%d", + "Get measurement for host": URI_STUB + "/groups/{group_id}/processes/{host}:{" + "port}/measurements?granularity={granularity}&period={period}" + "&m={measurement}", "Get list of databases for host": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{PORT}/databases", "Get measurements of database for host.": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{" "PORT}/databases/{DATABASE-NAME}/measurements", diff --git a/atlasapi/specs.py b/atlasapi/specs.py index 460c28f..d610a67 100644 --- a/atlasapi/specs.py +++ b/atlasapi/specs.py @@ -39,7 +39,7 @@ import logging from future import standard_library from logging import Logger - +from pprint import pprint from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement standard_library.install_aliases() @@ -150,8 +150,8 @@ def __init__(self, data: dict) -> None: def get_measurement_for_host(self, atlas_obj, granularity: Optional[AtlasGranularities] = None, period: Optional[AtlasPeriods] = None, - measurement: Optional[AtlasMeasurementTypes] = None, - iterable: bool = True) -> Union[dict, Iterable[AtlasMeasurement]]: + measurement: Optional[AtlasMeasurementTypes] = None + ) -> Union[dict, Iterable[AtlasMeasurement]]: """Get measurement(s) for a host Returns measurements for the Host object. @@ -165,7 +165,7 @@ def get_measurement_for_host(self, atlas_obj, granularity: Optional[AtlasGranula /api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{PORT}/measurements Keyword Args: - host_obj (Host): the host + Atlas obj (Atlas): the host granularity (AtlasGranularities): the desired granularity period (AtlasPeriods): The desired period measurement (AtlasMeasurementTypes) : The desired measurement or Measurement class @@ -214,24 +214,17 @@ def get_measurement_for_host(self, atlas_obj, granularity: Optional[AtlasGranula # Build the request return_val = atlas_obj.network.get(Settings.BASE_URL + uri) - measurement_obj = None - if iterable: - measurements = return_val.get('measurements') - measurements_count = len(measurements) - logger.info('There are {} measurements.'.format(measurements_count)) - - for each in measurements: - measurement_obj = AtlasMeasurement(name=each.get('name'), - units=each.get('units', None), + for each_response in return_val: + for each_measurement in each_response.get("measurements"): + measurement_obj = AtlasMeasurement(name=each_measurement.get('name'), + units=each_measurement.get('units', None), period=period, granularity=granularity) - for each_and_every in each.get('dataPoints'): + for each_and_every in each_measurement.get('dataPoints'): measurement_obj.measurements = AtlasMeasurementValue(each_and_every) - yield measurement_obj + yield measurement_obj - else: - return return_val def add_measurements(self, measurement) -> None: # TODO: Make measurements unique, use a set instead, but then how do we concat 2? diff --git a/tests/test_monitoring.py b/tests/test_monitoring.py index 17f0831..e339968 100644 --- a/tests/test_monitoring.py +++ b/tests/test_monitoring.py @@ -43,6 +43,7 @@ def test_01_get_cluster_names(self): self.a.Hosts.fill_host_list() cluster_list = self.a.Hosts.host_names for each_cluster in cluster_list: + print(f"Cluster name: {each_cluster}") self.assertIsInstance(each_cluster, str) test_01_get_cluster_names.basic = True @@ -52,6 +53,7 @@ def test_02_fill_measurement(self): self.assertGreaterEqual(len(self.a.Hosts.host_list), 2) self.a.Hosts.get_measurement_for_hosts() for each in self.a.Hosts.host_list_with_measurements: + logger.warning(f"Host: {each.__dict__}") self.assertIsInstance(each, Host) self.assertGreaterEqual(len(each.measurements), 1) for each_measurement in each.measurements: @@ -65,6 +67,7 @@ def test_02_fill_measurement(self): test_02_fill_measurement.basic = True def test_03_measurement_stats(self): + self.a.Hosts.fill_host_list() self.a.Hosts.get_measurement_for_hosts() print(f'For {self.a.Hosts.host_list_with_measurements.__len__()} hosts:') @@ -120,6 +123,7 @@ def test_06_measurement_stats_cache_bytes_into(self): print(f'THe original data type sent for measurement is {type(measurement)}') self.a.Hosts.get_measurement_for_hosts(measurement=measurement, granularity=granularity, period=period) print(f'For {self.a.Hosts.host_list_with_measurements.__len__()} hosts:') + print(f"There are {len(self.a.Hosts.host_list_with_measurements[0].measurements)}") for each in self.a.Hosts.host_list_with_measurements[0].measurements: print(f'For metric {each.name}') self.assertIsInstance(each.measurement_stats_friendly, atlasapi.measurements.StatisticalValuesFriendly) From 914222f33c15ac0cc218f7d8db47117bb6058a72 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sat, 27 Aug 2022 23:41:08 -0700 Subject: [PATCH 04/11] Completed all monitoring and metrics tests for #112. --- atlasapi/network.py | 23 ++++++++++++++++----- atlasapi/settings.py | 12 +++++------ atlasapi/specs.py | 48 ++++++++++++++++++-------------------------- 3 files changed, 42 insertions(+), 41 deletions(-) diff --git a/atlasapi/network.py b/atlasapi/network.py index 170db94..eb82bde 100644 --- a/atlasapi/network.py +++ b/atlasapi/network.py @@ -139,6 +139,7 @@ def _paginate(self, method , url, **kwargs): session = None try: + logger.debug(f"{method} - URI Being called is {url}") session = requests.Session() request = session.request(method=method, url=url, **kwargs) logger.debug("Request arguments: {}".format(str(kwargs))) @@ -147,23 +148,32 @@ def _paginate(self, method , url, **kwargs): total_count = first_page.get("totalCount", 0) items_per_page = Settings.itemsPerPage if total_count > items_per_page: + logger.warning(f"More than on page required, proceeding . . .") for page_number in range(2, ceil(total_count / items_per_page) + 1): - request = session.request(method=method, url=url, params={'pageNum':page_number}, **kwargs) + # Need to ensure that any params sent in kwargs are merged with the pageNum param. + if kwargs.get('params'): + existing_params: dict = kwargs.get('params') + logger.debug(f"Existing params are: {existing_params}") + existing_params.update(dict(pageNum=page_number)) + logger.debug(f"New params are {existing_params}") + kwargs["params"] = existing_params + logger.debug(f"Fully updated kwargs is now... {kwargs}") + request = session.request(method=method, url=url, **kwargs) logger.debug("Request arguments: {}".format(str(kwargs))) next_page = self.answer(request.status_code, request.json()) yield next_page except Exception as e: - #logger.warning('Request: {}'.format(request.__dict__)) - #logger.warning('Response: {}'.format(request.__dict__)) + logger.error('Error in Request: {}'.format(request.__dict__)) raise e finally: if session: session.close() - def get(self, uri): + def get(self, uri, **kwargs): """Get request Args: + call_params: uri (str): URI Returns: @@ -172,13 +182,16 @@ def get(self, uri): Raises: Exception: Network issue """ + if kwargs is not None: + logger.info(f"kwargs are: {kwargs}") yield from self._paginate( method='GET', url=uri, allow_redirects=True, timeout=Settings.requests_timeout, headers={}, - auth=self.auth_method(self.user, self.password)) + auth=self.auth_method(self.user, self.password), + **kwargs) def post(self, uri, payload): """Post request diff --git a/atlasapi/settings.py b/atlasapi/settings.py index fd8caa1..a3a4237 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Matthew G. Monteleone +# Copyright (c) 2022 Matthew G. Monteleone # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,14 +49,12 @@ class Settings: "DISK-NAME}/measurements", "Get the log file for a host in the cluster": "/api/atlas/v1.0/groups/{group_id}/clusters/{" "host}/logs/{logname}", - "Get Available Disks for Process": "/api/atlas/v1.0/groups/{group_id}/processes/" - "{host}:{port}/disks", - "Get Measurements of a Disk for Process": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{port}/disks/" + "Get Available Disks for Process": URI_STUB + "/groups/{group_id}/processes/{host}:{port}/disks", + "Get Measurements of a Disk for Process": URI_STUB + "/groups/{group_id}/processes/{host}:{port}/disks/" "{disk_name}/measurements", - "Get Measurements of a Database for Process": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{port}/" + "Get Measurements of a Database for Process": URI_STUB + "/groups/{group_id}/processes/{host}:{port}/" "databases/{database_name}/measurements", - "Get Available Databases for Process": "/api/atlas/v1.0/groups/{group_id}/processes/" - "{host}:{port}/databases" + "Get Available Databases for Process": URI_STUB + "/groups/{group_id}/processes/{host}:{port}/databases" }, "Events": { "Get All Project Events": URI_STUB + "/groups/{group_id}/events?includeRaw=true" + f"&itemsPerPage={ITEMS_PER_PAGE}", diff --git a/atlasapi/specs.py b/atlasapi/specs.py index d610a67..a8fb06c 100644 --- a/atlasapi/specs.py +++ b/atlasapi/specs.py @@ -41,6 +41,8 @@ from logging import Logger from pprint import pprint from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement +from requests.compat import urljoin +from urllib.parse import urlencode standard_library.install_aliases() logger: Logger = logging.getLogger('Atlas.specs') @@ -259,9 +261,10 @@ def get_partitions(self, atlas_obj) -> Iterable[str]: ) logger.info(f"The full URI being called is {Settings.BASE_URL + uri}") return_val = atlas_obj.network.get(Settings.BASE_URL + uri) - for each_partition in return_val.get("results"): - partition_name: str = each_partition.get('partitionName', None) - yield partition_name + for each_result in return_val: + for each_partition in each_result.get("results"): + partition_name: str = each_partition.get('partitionName', None) + yield partition_name def get_measurements_for_disk(self, atlas_obj, partition_name: str, granularity: Optional[AtlasGranularities] = None, @@ -297,15 +300,11 @@ def get_measurements_for_disk(self, atlas_obj, partition_name: str, disk_name=partition_name, ) logger.info(f"The full URI being called is {Settings.BASE_URL + uri}") - logger.info(f"We sent the following parameters: {parameters}") - return_val = atlas_obj.network.get_big(Settings.BASE_URL + uri, params=parameters) - + logger.info(f"We will send the following parameters: {parameters}") + return_val = atlas_obj.network.get(uri=Settings.BASE_URL + uri, params=parameters) measurement_obj = None - if iterable: - measurements = return_val.get('measurements') - measurements_count = len(measurements) - logger.warning('There are {} measurements.'.format(measurements_count)) - for each in measurements: + for each_page in return_val: + for each in each_page.get('measurements'): measurement_obj = AtlasMeasurement(name=each.get('name'), period=period, granularity=granularity, @@ -315,8 +314,6 @@ def get_measurements_for_disk(self, atlas_obj, partition_name: str, yield measurement_obj - return return_val - def data_partition_stats(self, atlas_obj, granularity: Optional[AtlasGranularities] = None, period: Optional[AtlasPeriods] = None, ) -> Iterable[AtlasMeasurement]: """Returns disk measurements for the data partition of the host. @@ -353,27 +350,26 @@ def get_databases(self, atlas_obj) -> Iterable[str]: ) logger.info(f"The full URI being called is {Settings.BASE_URL + uri}") return_val = atlas_obj.network.get(Settings.BASE_URL + uri) - for each_database in return_val.get("results"): - db_name = each_database.get('databaseName', None) - yield db_name + for each_page in return_val: + for each_database in each_page.get("results"): + db_name = each_database.get('databaseName', None) + yield db_name def get_measurements_for_database(self, atlas_obj, database_name: str, granularity: Optional[AtlasGranularities] = None, - period: Optional[AtlasPeriods] = None, iterable: bool = True) -> \ - Iterable[Union[AtlasMeasurement, Any]]: + period: Optional[AtlasPeriods] = None) -> Iterable[AtlasMeasurement]: """Returns All Metrics for a database, for a given period and granularity. Uses default granularity and period if not passed. Args: - iterable (bool): Defaults to true, if not true will return the raw response from API. database_name (str): The database name (local should always exist, and can be used for testing) period (Optional[AtlasPeriods]):The period for the disk measurements granularity (Optional[AtlasGranularitues]): The granularity for the disk measurements. atlas_obj (atlasapi.atlas.Atlas): A configured Atlas instance to connect to the API with. Returns: - Iterable[Union[AtlasMeasurement, Any]: Yields AtlasMeasirements or the original response. + Iterable[Union[AtlasMeasurement]: Yields AtlasMeasirements . """ if period is None: period = AtlasPeriods.WEEKS_1 @@ -392,14 +388,10 @@ def get_measurements_for_database(self, atlas_obj, database_name: str, ) logger.info(f"The full URI being called is {Settings.BASE_URL + uri}") logger.info(f"We sent the following parameters: {parameters}") - return_val = atlas_obj.network.get_big(Settings.BASE_URL + uri, params=parameters) - + return_val = atlas_obj.network.get(Settings.BASE_URL + uri, params=parameters) measurement_obj = None - if iterable: - measurements = return_val.get('measurements') - measurements_count = len(measurements) - logger.warning('There are {} measurements.'.format(measurements_count)) - for each in measurements: + for each_page in return_val: + for each in each_page.get("measurements"): measurement_obj = AtlasMeasurement(name=each.get('name'), period=period, granularity=granularity, @@ -409,8 +401,6 @@ def get_measurements_for_database(self, atlas_obj, database_name: str, yield measurement_obj - return return_val - def __hash__(self): return hash(self.hostname) From adb2392a747172c2aa73c59f764f81900abc9f79 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sat, 27 Aug 2022 23:49:18 -0700 Subject: [PATCH 05/11] Removed unused iterator class for #112 --- atlasapi/atlas.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py index d33ffec..d96de7e 100644 --- a/atlasapi/atlas.py +++ b/atlasapi/atlas.py @@ -2081,12 +2081,6 @@ def __init__(self, atlas, pageNum, itemsPerPage): # noinspection PyProtectedMember -class HostsGetAll(AtlasPagination): - """Pagination for Processes : Get All""" - - def __init__(self, atlas: Atlas, pageNum: int, itemsPerPage: int): - super().__init__(atlas, atlas.Hosts._get_all_hosts, pageNum, itemsPerPage) - class DatabaseUsersGetAll(AtlasPagination): """Pagination for Database User : Get All""" From 42ce1dc79fff99e7079f2060d9d47f923cfd4bca Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sun, 28 Aug 2022 00:05:23 -0700 Subject: [PATCH 06/11] removed old monitoring test file. --- tests/monitoring_logs.py | 101 --------------------------------------- 1 file changed, 101 deletions(-) delete mode 100644 tests/monitoring_logs.py diff --git a/tests/monitoring_logs.py b/tests/monitoring_logs.py deleted file mode 100644 index f7b76c1..0000000 --- a/tests/monitoring_logs.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Stupid and simple smoke tests. - -Uses ENV vars to store user, key and group. - -TODO: Create real tests - - -""" - -from atlasapi.atlas import Atlas -from pprint import pprint -from os import environ, getenv - -from atlasapi.specs import ListOfHosts, Host - -USER = getenv('ATLAS_USER', None) -API_KEY = getenv('ATLAS_KEY', None) -GROUP_ID = getenv('ATLAS_GROUP', None) -from atlasapi.lib import AtlasPeriods, AtlasUnits, AtlasGranularities -from atlasapi.measurements import AtlasMeasurementTypes -import csv -if not USER or not API_KEY or not GROUP_ID: - raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables' - 'your env variables are {}'.format(environ.__str__())) - -a = Atlas(USER, API_KEY, GROUP_ID) - -# Low level Api -# details = a.Hosts._get_all_hosts(pageNum=1, itemsPerPage=100) -# pprint(details) -# print('-----------------Now as iterable ------------------') -# Iterable -# for a_host in a.Hosts.host_names: -# print(a_host) - -pprint('----------MeasureMents') - -# a.Hosts._get_measurement_for_host(a.Hosts.host_list[0] -# ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True -# ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE) -# -# a.Hosts._get_measurement_for_host(a.Hosts.host_list[0] -# ,measurement=AtlasMeasurementTypes.Memory.resident,iterable=True -# ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE) -# -# a.Hosts._get_measurement_for_host(a.Hosts.host_list[1] -# ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True -# ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE) -# -# a.Hosts._get_measurement_for_host(a.Hosts.host_list[0] -# ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True -# ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE) -# -# -# print(len(a.Hosts.host_list)) -# -# for each in a.Hosts.host_list: -# print('Hostname: {} - Measurements: {}'.format(each.hostname, each.measurements)) -# pprint('------------Test list of clusters-----------------') -# -# cluster_list = a.Hosts.cluster_list - -# for cluster in cluster_list: -# print('Cluster name {}'.format(cluster)) - - -pprint('------------Test get hosts by cluster-----------------') - -# hosts = a.Hosts.host_list_by_cluster('monitoringtest') - -print('-----------Test get metrics for a clusters hosts---------------') -a.Hosts.fill_host_list(for_cluster='monitoringtest') - -#a.Hosts.get_measurement_for_hosts(measurement=AtlasMeasurementTypes.Network.bytes_out -# , period=AtlasPeriods.HOURS_1, granularity=AtlasGranularities.FIVE_MINUTE) - -for hostObj in a.Hosts.host_list: - hostObj.get_measurement_for_host(measurement=AtlasMeasurementTypes.Network.bytes_out, - period=AtlasPeriods.HOURS_1, granularity=AtlasGranularities.FIVE_MINUTE) - - - -the = list() - -#for host in a.Hosts.host_list: -# hostname = host.hostname -# for each_measurement in host.measurements: -# name = each_measurement.name -# for each_point in each_measurement._measurements: -# timestamp = each_point.timestamp -# value = each_point.value -# the.append(dict(hostname=hostname,metric=name,value=value,timestamp=timestamp)) -# pprint(dict(hostname=hostname,metric=name,value=value,timestamp=timestamp)) -# -#with open('names.csv', 'w', newline='') as csvfile: -# fieldnames = ['hostname', 'metric', 'value', 'timestamp'] -# writer = csv.DictWriter(csvfile, fieldnames=fieldnames) -# writer.writeheader() -# for item in the: -# writer.writerow(item) \ No newline at end of file From b9585255ef32833e188f434f3a3dcc5a8a98ae12 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sun, 28 Aug 2022 00:11:24 -0700 Subject: [PATCH 07/11] Added refactor note. --- atlasapi/atlas.py | 1 + 1 file changed, 1 insertion(+) diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py index d96de7e..fa137ba 100644 --- a/atlasapi/atlas.py +++ b/atlasapi/atlas.py @@ -647,6 +647,7 @@ def get_log_for_host(self, host_obj: Host, if date_to is None and date_from is None: logger.info('No dates passed so we are not going to send date params, API default will be used.') uri = Settings.BASE_URL + uri + # TODO: refator to use params instead of hand crafting the uri for the dates elif date_to is None and date_from is not None: logger.info('Received only a date_from, so sending only startDate') uri = Settings.BASE_URL + uri + f'?startDate={int(round(date_from.timestamp()))}' From 6f3d9ddf515dbc8dec8822f7cc355b3a8709b894 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sun, 28 Aug 2022 00:19:19 -0700 Subject: [PATCH 08/11] Small update to enable one more test in monitoring. --- tests/test_monitoring.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_monitoring.py b/tests/test_monitoring.py index e339968..95632a7 100644 --- a/tests/test_monitoring.py +++ b/tests/test_monitoring.py @@ -260,6 +260,8 @@ def test_16_issue_98_metric_name_write(self): print(f'👍Value is {each.measurement_stats_friendly.__dict__}') self.assertIsInstance(each.measurement_stats, atlasapi.measurements.StatisticalValues) + test_16_issue_98_metric_name_write.basic = True + def test_17_return_multiple_metrics(self): self.a.Hosts.fill_host_list() for each_host in self.a.Hosts.host_list_secondaries: From 7af13b25bda442937828e0a892d03a8e33773ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Pinsolle?= <82467106+jpinsolle-betclic@users.noreply.github.com> Date: Sun, 28 Aug 2022 09:38:33 +0200 Subject: [PATCH 09/11] Add FEDERATED_DATABASE events (#120) --- atlasapi/events_event_types.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/atlasapi/events_event_types.py b/atlasapi/events_event_types.py index 8392913..024e6e3 100644 --- a/atlasapi/events_event_types.py +++ b/atlasapi/events_event_types.py @@ -648,6 +648,9 @@ class AtlasEventTypes(Enum): FEDERATION_SETTINGS_CREATED = 'Federation Settings Created' FEDERATION_SETTINGS_DELETED = 'Federation Settings Deleted' FEDERATION_SETTINGS_UPDATED = 'Federation Settings Updated' + FEDERATED_DATABASE_CREATED = 'Federated Database Created' + FEDERATED_DATABASE_UPDATED = 'Federated Database Updated' + FEDERATED_DATABASE_REMOVED = 'Federated Database Removed' IDENTITY_PROVIDER_CREATED = 'Identity Provider Created' IDENTITY_PROVIDER_UPDATED = 'Identity Provider Updated' IDENTITY_PROVIDER_DELETED = 'Identity Provider Deleted' From 0a74f02aadf60e0b391875d5c0edb1ca5b94470b Mon Sep 17 00:00:00 2001 From: Julian Torres Date: Sun, 28 Aug 2022 09:01:58 +0100 Subject: [PATCH 10/11] fix(settings): allow `URI_STUB` to be set in the environment to set its value on-the-fly (#118) This change would allow a user to do something like the following, just before making the API call, allowing them to specify the `URI_STUB` manually for endpoints that accept something other than v1.5 (for example: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#operation/returnOneAdvancedClusterFromOneProject) --- atlasapi/__init__.py | 2 +- atlasapi/settings.py | 2 +- setup.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/atlasapi/__init__.py b/atlasapi/__init__.py index ed90048..71e9b6a 100644 --- a/atlasapi/__init__.py +++ b/atlasapi/__init__.py @@ -15,4 +15,4 @@ # __init__.py # Version of the realpython-reader package -__version__ = "2.0.1" \ No newline at end of file +__version__ = "2.0.2" diff --git a/atlasapi/settings.py b/atlasapi/settings.py index 1723f5c..2d51591 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -24,7 +24,7 @@ class Settings: # Atlas APIs BASE_URL = getenv('BASE_URL', 'https://cloud.mongodb.com') - URI_STUB = '/api/atlas/v1.0' + URI_STUB = getenv('URI_STUB', '/api/atlas/v1.0')) api_resources = { "Project": { diff --git a/setup.py b/setup.py index c84467e..f0ca0ae 100644 --- a/setup.py +++ b/setup.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 -from setuptools import setup, find_packages +from setuptools import find_packages, setup setup( name='atlasapi', - version='2.0.1', + version='2.0.2', python_requires='>=3.7', packages=find_packages(exclude=("tests",)), install_requires=['requests', 'python-dateutil', 'isodate', 'future', 'pytz','coolname', 'humanfriendly', 'nose'], From fbc0ce90d542ee6af8cc3df83340477821e8a1d8 Mon Sep 17 00:00:00 2001 From: "Matthew G. Monteleone" Date: Sun, 28 Aug 2022 01:18:09 -0700 Subject: [PATCH 11/11] Further updates to the events list. Small cleanup of settings file. --- atlasapi/events_event_types.py | 41 +++++++++++++++++++++++++++++++--- atlasapi/settings.py | 19 ++++++++-------- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/atlasapi/events_event_types.py b/atlasapi/events_event_types.py index 024e6e3..446e446 100644 --- a/atlasapi/events_event_types.py +++ b/atlasapi/events_event_types.py @@ -648,9 +648,6 @@ class AtlasEventTypes(Enum): FEDERATION_SETTINGS_CREATED = 'Federation Settings Created' FEDERATION_SETTINGS_DELETED = 'Federation Settings Deleted' FEDERATION_SETTINGS_UPDATED = 'Federation Settings Updated' - FEDERATED_DATABASE_CREATED = 'Federated Database Created' - FEDERATED_DATABASE_UPDATED = 'Federated Database Updated' - FEDERATED_DATABASE_REMOVED = 'Federated Database Removed' IDENTITY_PROVIDER_CREATED = 'Identity Provider Created' IDENTITY_PROVIDER_UPDATED = 'Identity Provider Updated' IDENTITY_PROVIDER_DELETED = 'Identity Provider Deleted' @@ -837,3 +834,41 @@ class AtlasEventTypes(Enum): ONLINE_ARCHIVE_DATA_EXPIRATION_RESOLVED = 'Online Archive Data Expiration Resolved' LOG_FORWARDER_FAILURE = 'Log Forwarder Failure' CLUSTER_UNLINKED_FROM_VERCEL = 'Cluster Unlinked From Vercel' + INGESTION_PIPELINE_DESTROYED = 'Ingestion Pipeline Destroyed' + TENANT_ENDPOINT_INITIATING = 'Tenant Endpoint Initiating' + ATLAS_SCHEDULED_MAINTENANCE_DELAYED = 'Atlas Scheduled Maintenance Delayed' + ATLAS_SCHEDULED_MAINTENANCE_COMPLETED = 'Atlas Scheduled Maintenance Completed' + FEDERATED_DATABASE_CREATED = 'Federated Database Created' + FEDERATED_DATABASE_UPDATED = 'Federated Database Updated' + FEDERATED_DATABASE_REMOVED = 'Federated Database Removed' + FEDERATED_DATABASE_QUERY_LOGS_DOWNLOADED = 'Federated Database Query Logs Downloaded' + INSIDE_REALM_METRIC_THRESHOLD = 'Inside Realm Metric Threshold' + OUTSIDE_REALM_METRIC_THRESHOLD = 'Outside Realm Metric Threshold' + TENANT_ENDPOINT_EXPIRED = 'Tenant Endpoint Expired' + DEVICE_SYNC_DEBUG_ACCESS_GRANTED = 'Device Sync Debug Access Granted' + DEVICE_SYNC_DEBUG_ACCESS_REVOKED = 'Device Sync Debug Access Revoked' + CHARGE_PROCESSING = 'Charge Processing' + LEGACY_2FA_RESET_EMAIL_SENT_AUDIT = 'Legacy 2Fa Reset Email Sent Audit' + LEGACY_2FA_RESET_AUDIT = 'Legacy 2Fa Reset Audit' + LEGACY_2FA_UPDATED_AUDIT = 'Legacy 2Fa Updated Audit' + DEVICE_SYNC_DEBUG_X509_CERT_CREATED = 'Device Sync Debug X509 Cert Created' + ONLINE_ARCHIVE_MAX_CONSECUTIVE_OFFLOAD_WINDOWS_CHECK = 'Online Archive Max Consecutive Offload Windows Check' + SERVERLESS_PROXIES_REPORTING = 'Serverless Proxies Reporting' + SERVERLESS_PROXIES_STOPPED_REPORTING = 'Serverless Proxies Stopped Reporting' + SUFFICIENT_APP_DB_FREE_SPACE = 'Sufficient App Db Free Space' + LOW_APP_DB_FREE_SPACE_PERCENT = 'Low App Db Free Space Percent' + SUFFICIENT_BLOCKSTORE_FREE_SPACE = 'Sufficient Blockstore Free Space' + LOW_BLOCKSTORE_FREE_SPACE_PERCENT = 'Low Blockstore Free Space Percent' + SUFFICIENT_S3_BLOCKSTORE_METADATA_DB_FREE_SPACE = 'Sufficient S3 Blockstore Metadata Db Free Space' + LOW_S3_BLOCKSTORE_METADATA_DB_FREE_SPACE_PERCENT = 'Low S3 Blockstore Metadata Db Free Space Percent' + SUFFICIENT_OPLOGSTORE_FREE_SPACE = 'Sufficient Oplogstore Free Space' + LOW_OPLOGSTORE_FREE_SPACE_PERCENT = 'Low Oplogstore Free Space Percent' + SUFFICIENT_S3_OPLOGSTORE_METADATA_DB_FREE_SPACE = 'Sufficient S3 Oplogstore Metadata Db Free Space' + LOW_S3_OPLOGSTORE_METADATA_DB_FREE_SPACE_PERCENT = 'Low S3 Oplogstore Metadata Db Free Space Percent' + PROJECT_LIVE_IMPORT_OVERRIDES_ADDED = 'Project Live Import Overrides Added' + PROJECT_LIVE_IMPORT_OVERRIDES_UPDATED = 'Project Live Import Overrides Updated' + PROJECT_LIVE_IMPORT_OVERRIDES_DELETED = 'Project Live Import Overrides Deleted' + CLUSTER_FORCE_PLANNED = 'Cluster Force Planned' + SUFFICIENT_FILESYSTEM_STORE_FREE_SPACE = 'Sufficient Filesystem Store Free Space' + LOW_FILESYSTEM_STORE_FREE_SPACE_PERCENT = 'Low Filesystem Store Free Space Percent' + PENDING_INDEXES_CANCELED = 'Pending Indexes Canceled' diff --git a/atlasapi/settings.py b/atlasapi/settings.py index 2d51591..ce5122e 100644 --- a/atlasapi/settings.py +++ b/atlasapi/settings.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Matthew G. Monteleone +# Copyright (c) 2022 Matthew G. Monteleone # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ class Settings: # Atlas APIs BASE_URL = getenv('BASE_URL', 'https://cloud.mongodb.com') - URI_STUB = getenv('URI_STUB', '/api/atlas/v1.0')) + URI_STUB = getenv('URI_STUB', '/api/atlas/v1.0') api_resources = { "Project": { @@ -63,14 +63,13 @@ class Settings: "min_date}" }, "Clusters": { - "Get All Clusters": "/api/atlas/v1.0/groups/%s/clusters?pageNum=%d&itemsPerPage=%d", - "Get a Single Cluster": "/api/atlas/v1.0/groups/%s/clusters/%s", - "Delete a Cluster": "/api/atlas/v1.0/groups/%s/clusters/%s", - "Create a Cluster": "/api/atlas/v1.0/groups/{GROUP_ID}/clusters/", - "Modify a Cluster": "/api/atlas/v1.0/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}", - "Test Failover": "/api/atlas/v1.0/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/restartPrimaries", - "Advanced Configuration Options": "/api/atlas/v1.0/groups/{GROUP_ID}/clusters/{" - "CLUSTER_NAME}/processArgs", + "Get All Clusters": URI_STUB + "/groups/%s/clusters?pageNum=%d&itemsPerPage=%d", + "Get a Single Cluster": URI_STUB + "/groups/%s/clusters/%s", + "Delete a Cluster": URI_STUB + "/groups/%s/clusters/%s", + "Create a Cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/", + "Modify a Cluster": URI_STUB + "/{GROUP_ID}/clusters/{CLUSTER_NAME}", + "Test Failover": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/restartPrimaries", + "Advanced Configuration Options": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/processArgs", }, "Database Users": {