Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion atlasapi/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
# __init__.py

# Version of the realpython-reader package
__version__ = "2.0.1"
__version__ = "2.0.2"
94 changes: 36 additions & 58 deletions atlasapi/atlas.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@
from dateutil.relativedelta import relativedelta
from atlasapi.specs import Host, ListOfHosts, DatabaseUsersUpdatePermissionsSpecs, DatabaseUsersPermissionsSpecs, \
ReplicaSetTypes
from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, OptionalAtlasMeasurement
from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement, \
OptionalAtlasMeasurement
from typing import Union, Iterator, List, Optional
from atlasapi.atlas_types import OptionalInt, OptionalBool, ListofDict
from atlasapi.clusters import ClusterConfig, ShardedClusterConfig, AtlasBasicReplicaSet, \
Expand Down Expand Up @@ -446,52 +447,31 @@ def __init__(self, atlas):
self.host_list_with_measurements: Optional[List[Host]] = list()
self.host_list: Optional[List[Host]] = list()

def _get_all_hosts(self, pageNum=Settings.pageNum,
itemsPerPage=Settings.itemsPerPage,
iterable=False):
def _get_all_hosts(self):
"""Get All Hosts (actually processes)

Internal use only, actual data retrieval comes from properties host_list and host_names
url: https://docs.atlas.mongodb.com/reference/api/alerts-get-all-alerts/
url: https://www.mongodb.com/docs/atlas/reference/api/processes-get-all/

Keyword Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response

Returns:
ListOfHosts or dict: Iterable object representing this function OR Response payload

Raises:
ErrPaginationLimits: Out of limits
:rtype: Union[ListOfHosts, dict]
:type iterable: OptionalBool
:type itemsPerPage: OptionalInt
:type pageNum: OptionalInt
Returns:
ListOfHosts: Iterable object representing this function

"""
uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format(
group_id=self.atlas.group)

# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)

if iterable:
item_list = list(HostsGetAll(self.atlas, pageNum, itemsPerPage))
obj_list = list()
for item in item_list:
obj_list.append(Host(item))

return_val = obj_list
else:
uri = Settings.api_resources["Monitoring and Logs"]["Get all processes for group"].format(
group_id=self.atlas.group,
page_num=pageNum,
items_per_page=itemsPerPage)

return_val = self.atlas.network.get(Settings.BASE_URL + uri)

return return_val
try:
response = self.atlas.network.get(Settings.BASE_URL + uri)
for page in response:
for each_process in page.get("results"):
yield Host(each_process)
except Exception as e:
raise e

def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]:
def fill_host_list(self, for_cluster: Optional[str] = None) -> Iterable[Host]:
"""
Fills the `self.hostname` property with the current hosts for the project/group.

Expand All @@ -502,9 +482,9 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]:
for_cluster (str): The name of the cluster for filter the host list.

Returns:
List[Host]: A lost of `Host` objects
Iterable[Host]: Yields `Host` objects
"""
host_list = self._get_all_hosts(iterable=True)
host_list = self._get_all_hosts()
if for_cluster:
out_list = list()
for host in host_list:
Expand All @@ -514,7 +494,7 @@ def fill_host_list(self, for_cluster: Optional[str] = None) -> List[Host]:
out_list.append(host)
self.host_list = out_list
else:
self.host_list = self._get_all_hosts(iterable=True)
self.host_list = list(self._get_all_hosts())

return self.host_list

Expand Down Expand Up @@ -667,6 +647,7 @@ def get_log_for_host(self, host_obj: Host,
if date_to is None and date_from is None:
logger.info('No dates passed so we are not going to send date params, API default will be used.')
uri = Settings.BASE_URL + uri
# TODO: refator to use params instead of hand crafting the uri for the dates
elif date_to is None and date_from is not None:
logger.info('Received only a date_from, so sending only startDate')
uri = Settings.BASE_URL + uri + f'?startDate={int(round(date_from.timestamp()))}'
Expand Down Expand Up @@ -769,10 +750,8 @@ def get_logs_for_cluster(self,
def _get_measurement_for_host(self, host_obj: Host,
granularity: Optional[AtlasGranularities] = None,
period: Optional[AtlasPeriods] = None,
measurement: Optional[AtlasMeasurementTypes] = None,
pageNum: int = Settings.pageNum,
itemsPerPage: int = Settings.itemsPerPage,
iterable: bool = True) -> Union[dict, Iterable[AtlasMeasurement]]:
measurement: Optional[AtlasMeasurementTypes] = None
) -> Iterable[AtlasMeasurement]:
"""Get measurement(s) for a host

Internal use only, should come from the host obj itself.
Expand Down Expand Up @@ -803,25 +782,21 @@ def _get_measurement_for_host(self, host_obj: Host,
ErrPaginationLimits: Out of limits

:rtype: List[measurements.AtlasMeasurement]
:type iterable: OptionalBool
:type itemsPerPage: OptionalInt
:type pageNum: OptionalInt
:type period: AtlasPeriods
:type granularity: AtlasGranularities
:type host_obj: Host
:type measurement: measurements.AtlasMeasurementTypes

"""

# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
# Set default measurement, period and granularity if none are sent
if measurement is None:
measurement = AtlasMeasurementTypes.Cache.dirty
if period is None:
period = AtlasPeriods.WEEKS_1

if granularity is None:
granularity = AtlasGranularities.HOUR

# Check to see if we received a leaf or branch of the measurements
logger.debug(f'Measurement is: {measurement}')
logger.debug(f'Measurement object type is {type(measurement)}')
Expand Down Expand Up @@ -857,9 +832,18 @@ def _get_measurement_for_host(self, host_obj: Host,
logger.debug(f'The URI used will be {uri}')
# Build the request
return_val = self.atlas.network.get(Settings.BASE_URL + uri)
for each_host in return_val:
try:
measurements = each_host.get('measurements')
except Exception as e:
logger.error(f"Error getting measurements from results")

if iterable:
measurements = return_val.get('measurements')
logger.error(e)
logger.error(f"The results look like {results}")
logger.error(f"The results have length {len(list(results))}")
for each in results:
logger.error(f"Results are: {each}")
raise e
measurements_count = len(measurements)
self.logger.info('There are {} measurements.'.format(measurements_count))

Expand All @@ -872,8 +856,7 @@ def _get_measurement_for_host(self, host_obj: Host,

yield measurement_obj

else:
return return_val


class _Events:
"""Events API
Expand Down Expand Up @@ -2099,11 +2082,6 @@ def __init__(self, atlas, pageNum, itemsPerPage):


# noinspection PyProtectedMember
class HostsGetAll(AtlasPagination):
"""Pagination for Processes : Get All"""

def __init__(self, atlas: Atlas, pageNum: int, itemsPerPage: int):
super().__init__(atlas, atlas.Hosts._get_all_hosts, pageNum, itemsPerPage)

class DatabaseUsersGetAll(AtlasPagination):
"""Pagination for Database User : Get All"""
Expand Down
38 changes: 38 additions & 0 deletions atlasapi/events_event_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -834,3 +834,41 @@ class AtlasEventTypes(Enum):
ONLINE_ARCHIVE_DATA_EXPIRATION_RESOLVED = 'Online Archive Data Expiration Resolved'
LOG_FORWARDER_FAILURE = 'Log Forwarder Failure'
CLUSTER_UNLINKED_FROM_VERCEL = 'Cluster Unlinked From Vercel'
INGESTION_PIPELINE_DESTROYED = 'Ingestion Pipeline Destroyed'
TENANT_ENDPOINT_INITIATING = 'Tenant Endpoint Initiating'
ATLAS_SCHEDULED_MAINTENANCE_DELAYED = 'Atlas Scheduled Maintenance Delayed'
ATLAS_SCHEDULED_MAINTENANCE_COMPLETED = 'Atlas Scheduled Maintenance Completed'
FEDERATED_DATABASE_CREATED = 'Federated Database Created'
FEDERATED_DATABASE_UPDATED = 'Federated Database Updated'
FEDERATED_DATABASE_REMOVED = 'Federated Database Removed'
FEDERATED_DATABASE_QUERY_LOGS_DOWNLOADED = 'Federated Database Query Logs Downloaded'
INSIDE_REALM_METRIC_THRESHOLD = 'Inside Realm Metric Threshold'
OUTSIDE_REALM_METRIC_THRESHOLD = 'Outside Realm Metric Threshold'
TENANT_ENDPOINT_EXPIRED = 'Tenant Endpoint Expired'
DEVICE_SYNC_DEBUG_ACCESS_GRANTED = 'Device Sync Debug Access Granted'
DEVICE_SYNC_DEBUG_ACCESS_REVOKED = 'Device Sync Debug Access Revoked'
CHARGE_PROCESSING = 'Charge Processing'
LEGACY_2FA_RESET_EMAIL_SENT_AUDIT = 'Legacy 2Fa Reset Email Sent Audit'
LEGACY_2FA_RESET_AUDIT = 'Legacy 2Fa Reset Audit'
LEGACY_2FA_UPDATED_AUDIT = 'Legacy 2Fa Updated Audit'
DEVICE_SYNC_DEBUG_X509_CERT_CREATED = 'Device Sync Debug X509 Cert Created'
ONLINE_ARCHIVE_MAX_CONSECUTIVE_OFFLOAD_WINDOWS_CHECK = 'Online Archive Max Consecutive Offload Windows Check'
SERVERLESS_PROXIES_REPORTING = 'Serverless Proxies Reporting'
SERVERLESS_PROXIES_STOPPED_REPORTING = 'Serverless Proxies Stopped Reporting'
SUFFICIENT_APP_DB_FREE_SPACE = 'Sufficient App Db Free Space'
LOW_APP_DB_FREE_SPACE_PERCENT = 'Low App Db Free Space Percent'
SUFFICIENT_BLOCKSTORE_FREE_SPACE = 'Sufficient Blockstore Free Space'
LOW_BLOCKSTORE_FREE_SPACE_PERCENT = 'Low Blockstore Free Space Percent'
SUFFICIENT_S3_BLOCKSTORE_METADATA_DB_FREE_SPACE = 'Sufficient S3 Blockstore Metadata Db Free Space'
LOW_S3_BLOCKSTORE_METADATA_DB_FREE_SPACE_PERCENT = 'Low S3 Blockstore Metadata Db Free Space Percent'
SUFFICIENT_OPLOGSTORE_FREE_SPACE = 'Sufficient Oplogstore Free Space'
LOW_OPLOGSTORE_FREE_SPACE_PERCENT = 'Low Oplogstore Free Space Percent'
SUFFICIENT_S3_OPLOGSTORE_METADATA_DB_FREE_SPACE = 'Sufficient S3 Oplogstore Metadata Db Free Space'
LOW_S3_OPLOGSTORE_METADATA_DB_FREE_SPACE_PERCENT = 'Low S3 Oplogstore Metadata Db Free Space Percent'
PROJECT_LIVE_IMPORT_OVERRIDES_ADDED = 'Project Live Import Overrides Added'
PROJECT_LIVE_IMPORT_OVERRIDES_UPDATED = 'Project Live Import Overrides Updated'
PROJECT_LIVE_IMPORT_OVERRIDES_DELETED = 'Project Live Import Overrides Deleted'
CLUSTER_FORCE_PLANNED = 'Cluster Force Planned'
SUFFICIENT_FILESYSTEM_STORE_FREE_SPACE = 'Sufficient Filesystem Store Free Space'
LOW_FILESYSTEM_STORE_FREE_SPACE_PERCENT = 'Low Filesystem Store Free Space Percent'
PENDING_INDEXES_CANCELED = 'Pending Indexes Canceled'
31 changes: 22 additions & 9 deletions atlasapi/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ def _paginate(self, method , url, **kwargs):
session = None

try:
logger.debug(f"{method} - URI Being called is {url}")
session = requests.Session()
request = session.request(method=method, url=url, **kwargs)
logger.debug("Request arguments: {}".format(str(kwargs)))
Expand All @@ -147,38 +148,50 @@ def _paginate(self, method , url, **kwargs):
total_count = first_page.get("totalCount", 0)
items_per_page = Settings.itemsPerPage
if total_count > items_per_page:
logger.warning(f"More than on page required, proceeding . . .")
for page_number in range(2, ceil(total_count / items_per_page) + 1):
request = session.request(method=method, url=url, params={'pageNum':page_number}, **kwargs)
# Need to ensure that any params sent in kwargs are merged with the pageNum param.
if kwargs.get('params'):
existing_params: dict = kwargs.get('params')
logger.debug(f"Existing params are: {existing_params}")
existing_params.update(dict(pageNum=page_number))
logger.debug(f"New params are {existing_params}")
kwargs["params"] = existing_params
logger.debug(f"Fully updated kwargs is now... {kwargs}")
request = session.request(method=method, url=url, **kwargs)
logger.debug("Request arguments: {}".format(str(kwargs)))
next_page = self.answer(request.status_code, request.json())
yield next_page
except Exception as e:
logger.warning('Request: {}'.format(request.request.__dict__))
logger.warning('Response: {}'.format(request.__dict__))
logger.error('Error in Request: {}'.format(request.__dict__))
raise e
finally:
if session:
session.close()
def get(self, uri):

def get(self, uri, **kwargs):
"""Get request

Args:
call_params:
uri (str): URI

Returns:
Json: API response

Raises:
Exception: Network issue
"""
if kwargs is not None:
logger.info(f"kwargs are: {kwargs}")
yield from self._paginate(
method='GET',
url=uri,
allow_redirects=True,
timeout=Settings.requests_timeout,
headers={},
auth=self.auth_method(self.user, self.password))
auth=self.auth_method(self.user, self.password),
**kwargs)

def post(self, uri, payload):
"""Post request
Expand Down
Loading