diff --git a/.travis.yml b/.travis.yml
index a38188cc..44bd894c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,12 +20,15 @@ env:
- TOXENV=pypy3-versioncheck PIP_DOWNLOAD_CACHE=$HOME/.pip-cache
- TOXENV=docs PIP_DOWNLOAD_CACHE=$HOME/.pip-cache
- TOXENV=integration
+- TOXENV=integration3
install:
+- virtualenv --version; test $TOXENV = "py32-unit" -o $TOXENV = "py32-versioncheck" -o $TOXENV = "pypy3-unit" -o $TOXENV = "pypy3-versioncheck" && pip install --upgrade virtualenv==13.1.2 || /bin/true
- git config --global user.email "travisci@jasonantman.com"
- git config --global user.name "travisci"
- pip install tox
- pip install codecov
- pip freeze
+- virtualenv --version
script:
- tox -r
after_success:
diff --git a/README.rst b/README.rst
index d3af1203..8f71296c 100644
--- a/README.rst
+++ b/README.rst
@@ -57,7 +57,7 @@ Develop:
:target: https://readthedocs.org/projects/awslimitchecker/?badge=develop
:alt: sphinx documentation for develop branch
-A script and python module to check your AWS service limits and usage using `boto `_.
+A script and python module to check your AWS service limits and usage using `boto3 `_.
Users building out scalable services in Amazon AWS often run into AWS' `service limits `_ -
often at the least convenient time (i.e. mid-deploy or when autoscaling fails). Amazon's `Trusted Advisor `_
@@ -71,8 +71,11 @@ Full project documentation is available at `http://awslimitchecker.readthedocs.o
Status
------
-This project is currently in very early development. At this time please consider it beta code and not fully tested in all situations;
-furthermore its API may be changing rapidly. I hope to have this stabilized soon.
+This project has just undergone a relatively major refactor to migrate from
+[boto](http://boto.readthedocs.org) to [boto3](http://boto3.readthedocs.org/),
+along with a refactor of much of the connection and usage gathering code. Until
+it's been running in production for a while, please consider this to be "beta"
+and make every effort to manually confirm the results for your environment.
What It Does
------------
@@ -91,10 +94,9 @@ What It Does
Requirements
------------
-* Python 2.6 through 3.4. Python 2.x is recommended, as `boto `_ (the AWS client library) currently has
- incomplete Python3 support. See the `boto documentation `_ for a list of AWS services that are Python3-compatible.
+* Python 2.6 through 3.5.
* Python `VirtualEnv `_ and ``pip`` (recommended installation method; your OS/distribution should have packages for these)
-* `boto `_ >= 2.32.0
+* `boto3 `_ >= 1.2.3
Installation
------------
@@ -114,7 +116,7 @@ Credentials
Aside from STS, awslimitchecker does nothing with AWS credentials, it leaves that to boto itself.
You must either have your credentials configured in one of boto's supported config
files, or set as environment variables. See
-`boto config `_
+`boto3 config `_
and
`this project's documentation `_
for further information.
diff --git a/awslimitchecker/checker.py b/awslimitchecker/checker.py
index 432a45e7..f9ce1c2d 100644
--- a/awslimitchecker/checker.py
+++ b/awslimitchecker/checker.py
@@ -194,7 +194,7 @@ def get_service_names(self):
def find_usage(self, service=None, use_ta=True):
"""
For each limit in the specified service (or all services if
- ``service`` is ``None``), query the AWS API via :py:mod:`boto`
+ ``service`` is ``None``), query the AWS API via ``boto3``
and find the current usage amounts for that limit.
This method updates the ``current_usage`` attribute of the
diff --git a/awslimitchecker/connectable.py b/awslimitchecker/connectable.py
index acabd1bf..61a0b1ba 100644
--- a/awslimitchecker/connectable.py
+++ b/awslimitchecker/connectable.py
@@ -38,11 +38,29 @@
"""
import logging
-import boto.sts
+import boto3
logger = logging.getLogger(__name__)
+class ConnectableCredentials(object):
+ """
+ boto's (2.x) :py:meth:`boto.sts.STSConnection.assume_role` returns a
+ :py:class:`boto.sts.credentials.Credentials` object, but boto3's
+ `boto3.sts.STSConnection.assume_role `_ just returns
+ a dict. This class provides a compatible interface for boto3.
+ """
+
+ def __init__(self, creds_dict):
+ self.access_key = creds_dict['Credentials']['AccessKeyId']
+ self.secret_key = creds_dict['Credentials']['SecretAccessKey']
+ self.session_token = creds_dict['Credentials']['SessionToken']
+ self.expiration = creds_dict['Credentials']['Expiration']
+ self.assumed_role_id = creds_dict['AssumedRoleUser']['AssumedRoleId']
+ self.assumed_role_arn = creds_dict['AssumedRoleUser']['Arn']
+
+
class Connectable(object):
"""
@@ -53,62 +71,97 @@ class Connectable(object):
# Class attribute to reuse credentials between calls
credentials = None
- def connect_via(self, driver):
+ @property
+ def _boto3_connection_kwargs(self):
"""
- Connect to an AWS API and return the connection object. If
- ``self.account_id`` is None, call ``driver(self.region)``. Otherwise,
- call :py:meth:`~._get_sts_token` to get STS token credentials using
- :py:meth:`boto.sts.STSConnection.assume_role` and call ``driver()`` with
- those credentials to use an assumed role.
-
- :param driver: the connect_to_region() function of the boto
- submodule to use to create this connection
- :type driver: :py:obj:`function`
- :returns: connected boto service class instance
+ Generate keyword arguments for boto3 connection functions.
+ If ``self.account_id`` is None, this will just include
+ ``region_name=self.region``. Otherwise, call
+ :py:meth:`~._get_sts_token_boto3` to get STS token credentials using
+ `boto3.STS.Client.assume_role `_ and include
+ those credentials in the return value.
+
+ :return: keyword arguments for boto3 connection functions
+ :rtype: dict
"""
+ kwargs = {'region_name': self.region}
if self.account_id is not None:
if Connectable.credentials is None:
- logger.debug("Connecting to %s for account %s (STS; %s)",
- self.service_name, self.account_id, self.region)
- Connectable.credentials = self._get_sts_token()
+ logger.debug("Connecting for account %s role '%s' with STS "
+ "(region: %s)", self.account_id, self.account_role,
+ self.region)
+ Connectable.credentials = self._get_sts_token_boto3()
else:
logger.debug("Reusing previous STS credentials for account %s",
self.account_id)
-
- conn = driver(
- self.region,
- aws_access_key_id=Connectable.credentials.access_key,
- aws_secret_access_key=Connectable.credentials.secret_key,
- security_token=Connectable.credentials.session_token)
+ kwargs['aws_access_key_id'] = Connectable.credentials.access_key
+ kwargs['aws_secret_access_key'] = Connectable.credentials.secret_key
+ kwargs['aws_session_token'] = Connectable.credentials.session_token
else:
- logger.debug("Connecting to %s (%s)",
- self.service_name, self.region)
- conn = driver(self.region)
- logger.info("Connected to %s", self.service_name)
- return conn
+ logger.debug("Connecting to region %s", self.region)
+ return kwargs
- def _get_sts_token(self):
+ def connect(self):
+ """
+ Connect to an AWS API via boto3 low-level client and set ``self.conn``
+ to the `boto3.client `_ object
+ (a ``botocore.client.*`` instance). If ``self.conn`` is not None,
+ do nothing. This connects to the API name given by ``self.api_name``.
+
+ :returns: None
+ """
+ if self.conn is not None:
+ return
+ kwargs = self._boto3_connection_kwargs
+ self.conn = boto3.client(self.api_name, **kwargs)
+ logger.info("Connected to %s in region %s", self.api_name,
+ self.conn._client_config.region_name)
+
+ def connect_resource(self):
+ """
+ Connect to an AWS API via boto3 high-level resource connection and set
+ ``self.resource_conn`` to the `boto3.resource `_ object
+ (a ``boto3.resources.factory.*.ServiceResource`` instance).
+ If ``self.resource_conn`` is not None,
+ do nothing. This connects to the API name given by ``self.api_name``.
+
+ :returns: None
+ """
+ if self.resource_conn is not None:
+ return
+ kwargs = self._boto3_connection_kwargs
+ self.resource_conn = boto3.resource(self.api_name, **kwargs)
+ logger.info("Connected to %s (resource) in region %s", self.api_name,
+ self.resource_conn.meta.client._client_config.region_name)
+
+ def _get_sts_token_boto3(self):
"""
Assume a role via STS and return the credentials.
- First connect to STS via :py:func:`boto.sts.connect_to_region`, then
- assume a role using :py:meth:`boto.sts.STSConnection.assume_role`
+ First connect to STS via :py:func:`boto3.client`, then
+ assume a role using `boto3.STS.Client.assume_role `_
using ``self.account_id`` and ``self.account_role`` (and optionally
``self.external_id``, ``self.mfa_serial_number``, ``self.mfa_token``).
- Return the resulting :py:class:`boto.sts.credentials.Credentials`
+ Return the resulting :py:class:`~.ConnectableCredentials`
object.
:returns: STS assumed role credentials
- :rtype: :py:class:`boto.sts.credentials.Credentials`
+ :rtype: :py:class:`~.ConnectableCredentials`
"""
logger.debug("Connecting to STS in region %s", self.region)
- sts = boto.sts.connect_to_region(self.region)
+ sts = boto3.client('sts', region_name=self.region)
arn = "arn:aws:iam::%s:role/%s" % (self.account_id, self.account_role)
logger.debug("STS assume role for %s", arn)
- role = sts.assume_role(arn, "awslimitchecker",
- external_id=self.external_id,
- mfa_serial_number=self.mfa_serial_number,
- mfa_token=self.mfa_token)
+ role = sts.assume_role(RoleArn=arn,
+ RoleSessionName="awslimitchecker",
+ ExternalId=self.external_id,
+ SerialNumber=self.mfa_serial_number,
+ TokenCode=self.mfa_token)
+ creds = ConnectableCredentials(role)
logger.debug("Got STS credentials for role; access_key_id=%s",
- role.credentials.access_key)
- return role.credentials
+ creds.access_key)
+ return creds
diff --git a/awslimitchecker/runner.py b/awslimitchecker/runner.py
index 96807747..19221be7 100644
--- a/awslimitchecker/runner.py
+++ b/awslimitchecker/runner.py
@@ -50,10 +50,15 @@
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger()
-# suppress boto internal logging below WARNING level
-boto_log = logging.getLogger("boto")
-boto_log.setLevel(logging.WARNING)
-boto_log.propagate = True
+# suppress boto3 internal logging below WARNING level
+boto3_log = logging.getLogger("boto3")
+boto3_log.setLevel(logging.WARNING)
+boto3_log.propagate = True
+
+# suppress botocore internal logging below WARNING level
+botocore_log = logging.getLogger("botocore")
+botocore_log.setLevel(logging.WARNING)
+botocore_log.propagate = True
class Runner(object):
@@ -73,7 +78,7 @@ def parse_args(self, argv):
:returns: parsed arguments
:rtype: :py:class:`argparse.Namespace`
"""
- desc = 'Report on AWS service limits and usage via boto, optionally ' \
+ desc = 'Report on AWS service limits and usage via boto3, optionally ' \
'warn about any services with usage nearing or exceeding their' \
' limits. For further help, see ' \
''
diff --git a/awslimitchecker/services/autoscaling.py b/awslimitchecker/services/autoscaling.py
index c963b3aa..603bf54e 100644
--- a/awslimitchecker/services/autoscaling.py
+++ b/awslimitchecker/services/autoscaling.py
@@ -38,8 +38,6 @@
"""
import abc # noqa
-import boto
-import boto.ec2.autoscale
import logging
from .base import _AwsService
@@ -52,15 +50,7 @@
class _AutoscalingService(_AwsService):
service_name = 'AutoScaling'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.ec2.autoscale.connect_to_region)
- else:
- self.conn = boto.connect_autoscale()
+ api_name = 'autoscaling'
def find_usage(self):
"""
@@ -75,7 +65,12 @@ def find_usage(self):
self.limits['Auto Scaling groups']._add_current_usage(
len(
- boto_query_wrapper(self.conn.get_all_groups)
+ boto_query_wrapper(
+ self.conn.describe_auto_scaling_groups,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['AutoScalingGroups'],
+ alc_marker_param='NextToken'
+ )['AutoScalingGroups']
),
aws_type='AWS::AutoScaling::AutoScalingGroup',
)
@@ -83,8 +78,11 @@ def find_usage(self):
self.limits['Launch configurations']._add_current_usage(
len(
boto_query_wrapper(
- self.conn.get_all_launch_configurations
- )
+ self.conn.describe_launch_configurations,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['LaunchConfigurations'],
+ alc_marker_param='NextToken'
+ )['LaunchConfigurations']
),
aws_type='AWS::AutoScaling::LaunchConfiguration',
)
@@ -145,9 +143,8 @@ def _update_limits_from_api(self):
"""
self.connect()
logger.info("Querying EC2 DescribeAccountAttributes for limits")
- lims = boto_query_wrapper(self.conn.get_account_limits)
+ lims = boto_query_wrapper(self.conn.describe_account_limits)
self.limits['Auto Scaling groups']._set_api_limit(
- lims.max_autoscaling_groups)
+ lims['MaxNumberOfAutoScalingGroups'])
self.limits['Launch configurations']._set_api_limit(
- lims.max_launch_configurations
- )
+ lims['MaxNumberOfLaunchConfigurations'])
diff --git a/awslimitchecker/services/base.py b/awslimitchecker/services/base.py
index 2054a6d2..e718fab7 100644
--- a/awslimitchecker/services/base.py
+++ b/awslimitchecker/services/base.py
@@ -48,6 +48,7 @@ class _AwsService(Connectable):
__metaclass__ = abc.ABCMeta
service_name = 'baseclass'
+ api_name = 'baseclass'
def __init__(self, warning_threshold, critical_threshold, account_id=None,
account_role=None, region=None, external_id=None,
@@ -104,27 +105,9 @@ def __init__(self, warning_threshold, critical_threshold, account_id=None,
self.limits = {}
self.limits = self.get_limits()
self.conn = None
+ self.resource_conn = None
self._have_usage = False
- @abc.abstractmethod
- def connect(self):
- """
- If not already done, establish a connection to the relevant AWS service
- and save as ``self.conn``. If ``self.region`` is defined, call
- ``self.connect_via()`` (:py:meth:`~.Connectable.connect_via`)
- passing the appripriate boto ``connect_to_region()`` function as the
- argument, else call the boto.connect_SERVICE_NAME() method directly.
- """
- """
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.ec2.connect_to_region)
- else:
- self.conn = boto.connect_ec2()
- """
- raise NotImplementedError('abstract base class')
-
@abc.abstractmethod
def find_usage(self):
"""
diff --git a/awslimitchecker/services/ebs.py b/awslimitchecker/services/ebs.py
index 70f6b9ba..1ad2dc11 100644
--- a/awslimitchecker/services/ebs.py
+++ b/awslimitchecker/services/ebs.py
@@ -38,8 +38,6 @@
"""
import abc # noqa
-import boto
-import boto.ec2
import logging
from .base import _AwsService
@@ -52,15 +50,7 @@
class _EbsService(_AwsService):
service_name = 'EBS'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.ec2.connect_to_region)
- else:
- self.conn = boto.connect_ec2()
+ api_name = 'ec2'
def find_usage(self):
"""
@@ -85,21 +75,27 @@ def _find_usage_ebs(self):
gp_gb = 0
mag_gb = 0
logger.debug("Getting usage for EBS volumes")
- for vol in boto_query_wrapper(self.conn.get_all_volumes):
+ results = boto_query_wrapper(
+ self.conn.describe_volumes,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['Volumes'],
+ alc_marker_param='NextToken'
+ )
+ for vol in results['Volumes']:
vols += 1
- if vol.type == 'io1':
- piops_gb += vol.size
- piops += vol.iops
- elif vol.type == 'gp2':
- gp_gb += vol.size
- elif vol.type == 'standard':
- mag_gb += vol.size
+ if vol['VolumeType'] == 'io1':
+ piops_gb += vol['Size']
+ piops += vol['Iops']
+ elif vol['VolumeType'] == 'gp2':
+ gp_gb += vol['Size']
+ elif vol['VolumeType'] == 'standard':
+ mag_gb += vol['Size']
else:
logger.error(
"ERROR - unknown volume type '%s' for volume %s;"
" not counting",
- vol.type,
- vol.id)
+ vol['VolumeType'],
+ vol['VolumeId'])
self.limits['Provisioned IOPS']._add_current_usage(
piops,
aws_type='AWS::EC2::Volume'
@@ -127,9 +123,15 @@ def _find_usage_ebs(self):
def _find_usage_snapshots(self):
"""find snapshot usage"""
logger.debug("Getting usage for EBS snapshots")
- snaps = boto_query_wrapper(self.conn.get_all_snapshots, owner='self')
+ snaps = boto_query_wrapper(
+ self.conn.describe_snapshots,
+ OwnerIds=['self'],
+ alc_marker_path=['NextToken'],
+ alc_data_path=['Snapshots'],
+ alc_marker_param='NextToken'
+ )
self.limits['Active snapshots']._add_current_usage(
- len(snaps),
+ len(snaps['Snapshots']),
aws_type='AWS::EC2::VolumeSnapshot'
)
diff --git a/awslimitchecker/services/ec2.py b/awslimitchecker/services/ec2.py
index a4e6ed7c..39b6712d 100644
--- a/awslimitchecker/services/ec2.py
+++ b/awslimitchecker/services/ec2.py
@@ -38,7 +38,6 @@
"""
import abc # noqa
-import boto
import logging
from collections import defaultdict
from copy import deepcopy
@@ -53,15 +52,7 @@
class _Ec2Service(_AwsService):
service_name = 'EC2'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.ec2.connect_to_region)
- else:
- self.conn = boto.connect_ec2()
+ api_name = 'ec2'
def find_usage(self):
"""
@@ -71,6 +62,7 @@ def find_usage(self):
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
+ self.connect_resource()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_instances()
@@ -130,15 +122,19 @@ def _get_reserved_instance_count(self):
reservations = defaultdict(int)
az_to_res = {}
logger.debug("Getting reserved instance information")
- res = boto_query_wrapper(self.conn.get_all_reserved_instances)
- for x in res:
- if x.state != 'active':
+ res = boto_query_wrapper(
+ self.conn.describe_reserved_instances,
+ alc_no_paginate=True
+ )
+ for x in res['ReservedInstances']:
+ if x['State'] != 'active':
logger.debug("Skipping ReservedInstance %s with state %s",
- x.id, x.state)
+ x['ReservedInstancesId'], x['State'])
continue
- if x.availability_zone not in az_to_res:
- az_to_res[x.availability_zone] = deepcopy(reservations)
- az_to_res[x.availability_zone][x.instance_type] += x.instance_count
+ if x['AvailabilityZone'] not in az_to_res:
+ az_to_res[x['AvailabilityZone']] = deepcopy(reservations)
+ az_to_res[x['AvailabilityZone']][
+ x['InstanceType']] += x['InstanceCount']
# flatten and return
for x in az_to_res:
az_to_res[x] = dict(az_to_res[x])
@@ -159,24 +155,25 @@ def _instance_usage(self):
ondemand[t] = 0
az_to_inst = {}
logger.debug("Getting usage for on-demand instances")
- for res in boto_query_wrapper(self.conn.get_all_reservations):
- for inst in res.instances:
- if inst.spot_instance_request_id:
- logger.warning("Spot instance found (%s); awslimitchecker "
- "does not yet support spot "
- "instances.", inst.id)
- continue
- if inst.state in ['stopped', 'terminated']:
- logger.debug("Ignoring instance %s in state %s", inst.id,
- inst.state)
- continue
- if inst.placement not in az_to_inst:
- az_to_inst[inst.placement] = deepcopy(ondemand)
- try:
- az_to_inst[inst.placement][inst.instance_type] += 1
- except KeyError:
- logger.error("ERROR - unknown instance type '%s'; not "
- "counting", inst.instance_type)
+ for inst in self.resource_conn.instances.all():
+ if inst.spot_instance_request_id:
+ logger.warning("Spot instance found (%s); awslimitchecker "
+ "does not yet support spot "
+ "instances.", inst.id)
+ continue
+ if inst.state['Name'] in ['stopped', 'terminated']:
+ logger.debug("Ignoring instance %s in state %s", inst.id,
+ inst.state['Name'])
+ continue
+ if inst.placement['AvailabilityZone'] not in az_to_inst:
+ az_to_inst[
+ inst.placement['AvailabilityZone']] = deepcopy(ondemand)
+ try:
+ az_to_inst[
+ inst.placement['AvailabilityZone']][inst.instance_type] += 1
+ except KeyError:
+ logger.error("ERROR - unknown instance type '%s'; not "
+ "counting", inst.instance_type)
return az_to_inst
def get_limits(self):
@@ -201,11 +198,13 @@ def _update_limits_from_api(self):
with the quotas returned. Updates ``self.limits``.
"""
self.connect()
+ self.connect_resource()
logger.info("Querying EC2 DescribeAccountAttributes for limits")
- attribs = boto_query_wrapper(self.conn.describe_account_attributes)
- for attrib in attribs:
- aname = attrib.attribute_name
- val = attrib.attribute_values[0]
+ # no need to paginate
+ attribs = self.conn.describe_account_attributes()
+ for attrib in attribs['AccountAttributes']:
+ aname = attrib['AttributeName']
+ val = attrib['AttributeValues'][0]['AttributeValue']
lname = None
if aname == 'max-elastic-ips':
lname = 'Elastic IP addresses (EIPs)'
@@ -281,10 +280,10 @@ def _find_usage_networking_sgs(self):
logger.debug("Getting usage for EC2 VPC resources")
sgs_per_vpc = defaultdict(int)
rules_per_sg = defaultdict(int)
- for sg in boto_query_wrapper(self.conn.get_all_security_groups):
+ for sg in self.resource_conn.security_groups.all():
if sg.vpc_id is not None:
sgs_per_vpc[sg.vpc_id] += 1
- rules_per_sg[sg.id] = len(sg.rules)
+ rules_per_sg[sg.id] = len(sg.ip_permissions)
# set usage
for vpc_id, count in sgs_per_vpc.items():
self.limits['Security groups per VPC']._add_current_usage(
@@ -301,22 +300,25 @@ def _find_usage_networking_sgs(self):
def _find_usage_networking_eips(self):
logger.debug("Getting usage for EC2 EIPs")
- addrs = boto_query_wrapper(self.conn.get_all_addresses)
+ vpc_addrs = self.resource_conn.vpc_addresses.all()
self.limits['VPC Elastic IP addresses (EIPs)']._add_current_usage(
- sum(1 for a in addrs if a.domain == 'vpc'),
+ sum(1 for a in vpc_addrs if a.domain == 'vpc'),
aws_type='AWS::EC2::EIP',
)
# the EC2 limits screen calls this 'EC2-Classic Elastic IPs'
# but Trusted Advisor just calls it 'Elastic IP addresses (EIPs)'
+ classic_addrs = self.resource_conn.classic_addresses.all()
self.limits['Elastic IP addresses (EIPs)']._add_current_usage(
- sum(1 for a in addrs if a.domain == 'standard'),
+ sum(1 for a in classic_addrs if a.domain == 'standard'),
aws_type='AWS::EC2::EIP',
)
def _find_usage_networking_eni_sg(self):
logger.debug("Getting usage for EC2 Network Interfaces")
- ints = boto_query_wrapper(self.conn.get_all_network_interfaces)
+ ints = self.resource_conn.network_interfaces.all()
for iface in ints:
+ if iface.vpc is None:
+ continue
self.limits['VPC security groups per elastic network '
'interface']._add_current_usage(
len(iface.groups),
diff --git a/awslimitchecker/services/elasticache.py b/awslimitchecker/services/elasticache.py
index ee4dd1fe..051fc549 100644
--- a/awslimitchecker/services/elasticache.py
+++ b/awslimitchecker/services/elasticache.py
@@ -38,14 +38,11 @@
"""
import abc # noqa
-import boto.elasticache
-from boto.elasticache.layer1 import ElastiCacheConnection
-from boto.exception import BotoServerError
+from botocore.exceptions import ClientError
import logging
from .base import _AwsService
from ..limit import AwsLimit
-from ..utils import boto_query_wrapper
logger = logging.getLogger(__name__)
@@ -53,15 +50,7 @@
class _ElastiCacheService(_AwsService):
service_name = 'ElastiCache'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.elasticache.connect_to_region)
- else:
- self.conn = ElastiCacheConnection()
+ api_name = 'elasticache'
def find_usage(self):
"""
@@ -83,40 +72,29 @@ def find_usage(self):
def _find_usage_nodes(self):
"""find usage for cache nodes"""
nodes = 0
- clusters = boto_query_wrapper(
- self.conn.describe_cache_clusters,
- show_cache_node_info=True,
- alc_marker_path=[
- 'DescribeCacheClustersResponse',
- 'DescribeCacheClustersResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheClustersResponse',
- 'DescribeCacheClustersResult',
- 'CacheClusters'
- ],
- alc_marker_param='marker'
- )[
- 'DescribeCacheClustersResponse']['DescribeCacheClustersResult'][
- 'CacheClusters']
- for cluster in clusters:
- try:
- num_nodes = len(cluster['CacheNodes'])
- except (IndexError, TypeError):
- # sometimes CacheNodes is None...
- logger.debug("Cache Cluster '%s' returned dict with CacheNodes "
- "None", cluster['CacheClusterId'])
- num_nodes = cluster['NumCacheNodes']
- nodes += num_nodes
- self.limits['Nodes per Cluster']._add_current_usage(
- num_nodes,
- aws_type='AWS::ElastiCache::CacheCluster',
- resource_id=cluster['CacheClusterId'],
- )
+ num_clusters = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_cache_clusters')
+ for page in paginator.paginate(ShowCacheNodeInfo=True):
+ for cluster in page['CacheClusters']:
+ try:
+ num_nodes = len(cluster['CacheNodes'])
+ except (IndexError, TypeError):
+ # sometimes CacheNodes is None...
+ logger.debug(
+ "Cache Cluster '%s' returned dict with CacheNodes "
+ "None", cluster['CacheClusterId'])
+ num_nodes = cluster['NumCacheNodes']
+ nodes += num_nodes
+ num_clusters += 1
+ self.limits['Nodes per Cluster']._add_current_usage(
+ num_nodes,
+ aws_type='AWS::ElastiCache::CacheCluster',
+ resource_id=cluster['CacheClusterId'],
+ )
self.limits['Clusters']._add_current_usage(
- len(clusters),
+ num_clusters,
aws_type='AWS::ElastiCache::CacheCluster'
)
self.limits['Nodes']._add_current_usage(
@@ -126,85 +104,54 @@ def _find_usage_nodes(self):
def _find_usage_subnet_groups(self):
"""find usage for elasticache subnet groups"""
- groups = boto_query_wrapper(
- self.conn.describe_cache_subnet_groups,
- alc_marker_path=[
- 'DescribeCacheSubnetGroupsResponse',
- 'DescribeCacheSubnetGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheSubnetGroupsResponse',
- 'DescribeCacheSubnetGroupsResult',
- 'CacheSubnetGroups'
- ],
- alc_marker_param='marker'
- )[
- 'DescribeCacheSubnetGroupsResponse'][
- 'DescribeCacheSubnetGroupsResult'][
- 'CacheSubnetGroups']
+ num_groups = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_cache_subnet_groups')
+ for page in paginator.paginate():
+ for group in page['CacheSubnetGroups']:
+ num_groups += 1
self.limits['Subnet Groups']._add_current_usage(
- len(groups),
+ num_groups,
aws_type='AWS::ElastiCache::SubnetGroup'
)
def _find_usage_parameter_groups(self):
"""find usage for elasticache parameter groups"""
- groups = boto_query_wrapper(
- self.conn.describe_cache_parameter_groups,
- alc_marker_path=[
- 'DescribeCacheParameterGroupsResponse',
- 'DescribeCacheParameterGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheParameterGroupsResponse',
- 'DescribeCacheParameterGroupsResult',
- 'CacheParameterGroups'
- ],
- alc_marker_param='marker'
- )[
- 'DescribeCacheParameterGroupsResponse'][
- 'DescribeCacheParameterGroupsResult'][
- 'CacheParameterGroups']
+ num_groups = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_cache_parameter_groups')
+ for page in paginator.paginate():
+ for group in page['CacheParameterGroups']:
+ num_groups += 1
self.limits['Parameter Groups']._add_current_usage(
- len(groups),
+ num_groups,
aws_type='AWS::ElastiCache::ParameterGroup'
)
def _find_usage_security_groups(self):
"""find usage for elasticache security groups"""
+ num_groups = 0
+ # If EC2-Classic isn't available (e.g., a new account)
+ # this method will fail with:
+ # Code: "InvalidParameterValue"
+ # Message: "Use of cache security groups is not permitted in
+ # this API version for your account."
+ # Type: "Sender"
try:
- # If EC2-Classic isn't available (e.g., a new account)
- # this method will fail with:
- # Code: "InvalidParameterValue"
- # Message: "Use of cache security groups is not permitted in
- # this API version for your account."
- # Type: "Sender"
- groups = boto_query_wrapper(
- self.conn.describe_cache_security_groups,
- alc_marker_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'CacheSecurityGroups'
- ],
- alc_marker_param='marker'
- )[
- 'DescribeCacheSecurityGroupsResponse'][
- 'DescribeCacheSecurityGroupsResult'][
- 'CacheSecurityGroups']
- except BotoServerError:
- logger.debug("caught BotoServerError checking ElastiCache security "
+ # this boto3 class has a paginator, no need for boto_query_wrapper
+ paginator = self.conn.get_paginator(
+ 'describe_cache_security_groups')
+ for page in paginator.paginate():
+ for secgroup in page['CacheSecurityGroups']:
+ num_groups += 1
+ except ClientError as ex:
+ if ex.response['Error']['Code'] != 'InvalidParameterValue':
+ raise ex
+ logger.debug("caught ClientError checking ElastiCache security "
"groups (account without EC2-Classic?)")
- groups = []
self.limits['Security Groups']._add_current_usage(
- len(groups),
+ num_groups,
aws_type='WS::ElastiCache::SecurityGroup'
)
diff --git a/awslimitchecker/services/elb.py b/awslimitchecker/services/elb.py
index d7912ec7..0ee6b197 100644
--- a/awslimitchecker/services/elb.py
+++ b/awslimitchecker/services/elb.py
@@ -38,8 +38,6 @@
"""
import abc # noqa
-import boto
-import boto.ec2.elb
import logging
from .base import _AwsService
@@ -52,15 +50,7 @@
class _ElbService(_AwsService):
service_name = 'ELB'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.ec2.elb.connect_to_region)
- else:
- self.conn = boto.connect_elb()
+ api_name = 'elb'
def find_usage(self):
"""
@@ -72,16 +62,21 @@ def find_usage(self):
self.connect()
for lim in self.limits.values():
lim._reset_usage()
- lbs = boto_query_wrapper(self.conn.get_all_load_balancers)
+ lbs = boto_query_wrapper(
+ self.conn.describe_load_balancers,
+ alc_marker_path=['NextMarker'],
+ alc_data_path=['LoadBalancerDescriptions'],
+ alc_marker_param='Marker'
+ )
self.limits['Active load balancers']._add_current_usage(
- len(lbs),
+ len(lbs['LoadBalancerDescriptions']),
aws_type='AWS::ElasticLoadBalancing::LoadBalancer',
)
- for lb in lbs:
+ for lb in lbs['LoadBalancerDescriptions']:
self.limits['Listeners per load balancer']._add_current_usage(
- len(lb.listeners),
+ len(lb['ListenerDescriptions']),
aws_type='AWS::ElasticLoadBalancing::LoadBalancer',
- resource_id=lb.name,
+ resource_id=lb['LoadBalancerName'],
)
self._have_usage = True
logger.debug("Done checking usage.")
diff --git a/awslimitchecker/services/newservice.py.example b/awslimitchecker/services/newservice.py.example
index be5529ae..7eaec78c 100644
--- a/awslimitchecker/services/newservice.py.example
+++ b/awslimitchecker/services/newservice.py.example
@@ -38,7 +38,6 @@ Jason Antman
"""
import abc # noqa
-import boto
import logging
from .base import _AwsService
@@ -51,19 +50,7 @@ logger = logging.getLogger(__name__)
class _XXNewServiceXXService(_AwsService):
service_name = 'XXNewServiceXX'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- # TODO: set this to the correct connection methods:
- elif self.region:
- self.conn = self.connect_via(boto.XXnewserviceXX.connect_to_region)
- else:
- logger.debug("Connecting to %s (no region specified)",
- self.service_name)
- self.conn = boto.connect_XXnewserviceXX()
- logger.info("Connected to %s", self.service_name)
+ api_name = 'XXnewserviceXX' # AWS API name to connect to (boto3.client)
def find_usage(self):
"""
@@ -78,6 +65,9 @@ class _XXNewServiceXXService(_AwsService):
# TODO: update your usage here, i.e.:
"""
u = boto_query_wrapper(self.conn.some_method) # count of something, from boto
+ # be sure to set the pagination parameters per
+ # awslimitchecker.utils.boto_query_wrapper and
+ # awslimitchecker.utils.paginate_query
u_id = (resource id from AWS)
self.limits['Number of u']._add_current_usage(u, aws_type='U', id=u_id)
"""
diff --git a/awslimitchecker/services/rds.py b/awslimitchecker/services/rds.py
index 48d41b85..87ad8a02 100644
--- a/awslimitchecker/services/rds.py
+++ b/awslimitchecker/services/rds.py
@@ -38,13 +38,10 @@
"""
import abc # noqa
-import boto
-import boto.rds2
import logging
from .base import _AwsService
from ..limit import AwsLimit
-from ..utils import boto_query_wrapper
logger = logging.getLogger(__name__)
@@ -52,15 +49,7 @@
class _RDSService(_AwsService):
service_name = 'RDS'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.rds2.connect_to_region)
- else:
- self.conn = boto.connect_rds2()
+ api_name = 'rds'
def find_usage(self):
"""
@@ -85,40 +74,25 @@ def find_usage(self):
def _find_usage_instances(self):
"""find usage for DB Instances and related limits"""
- # instance count
- instances = boto_query_wrapper(
- self.conn.describe_db_instances,
- alc_marker_path=[
- 'DescribeDBInstancesResponse',
- 'DescribeDBInstancesResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeDBInstancesResponse',
- 'DescribeDBInstancesResult',
- 'DBInstances'
- ],
- alc_marker_param='marker'
- )
- instances = instances[
- 'DescribeDBInstancesResponse'][
- 'DescribeDBInstancesResult']['DBInstances']
+ count = 0
+ allocated_gb = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_db_instances')
+ for page in paginator.paginate():
+ for instance in page['DBInstances']:
+ count += 1
+ allocated_gb += instance['AllocatedStorage']
+ self.limits['Read replicas per master']._add_current_usage(
+ len(instance['ReadReplicaDBInstanceIdentifiers']),
+ aws_type='AWS::RDS::DBInstance',
+ resource_id=instance['DBInstanceIdentifier']
+ )
+
self.limits['DB instances']._add_current_usage(
- len(instances),
+ count,
aws_type='AWS::RDS::DBInstance'
)
- # per-instance limits
- allocated_gb = 0
- for i in instances:
- allocated_gb += i['AllocatedStorage']
- self.limits['Read replicas per master']._add_current_usage(
- len(i['ReadReplicaDBInstanceIdentifiers']),
- aws_type='AWS::RDS::DBInstance',
- resource_id=i['DBInstanceIdentifier']
- )
-
- # overall storage quota
self.limits['Storage quota (GB)']._add_current_usage(
allocated_gb,
aws_type='AWS::RDS::DBInstance'
@@ -126,108 +100,57 @@ def _find_usage_instances(self):
def _find_usage_reserved_instances(self):
"""find usage for reserved instances"""
- reserved = boto_query_wrapper(
- self.conn.describe_reserved_db_instances,
- alc_marker_path=[
- 'DescribeReservedDBInstancesResponse',
- 'DescribeReservedDBInstancesResult',
- "Marker"
- ],
- alc_data_path=[
- 'DescribeReservedDBInstancesResponse',
- 'DescribeReservedDBInstancesResult',
- 'ReservedDBInstances'
- ],
- alc_marker_param='marker'
- )[
- 'DescribeReservedDBInstancesResponse'][
- 'DescribeReservedDBInstancesResult'][
- 'ReservedDBInstances']
+ count = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_reserved_db_instances')
+ for page in paginator.paginate():
+ for inst in page['ReservedDBInstances']:
+ count += 1
self.limits['Reserved Instances']._add_current_usage(
- len(reserved),
+ count,
aws_type='AWS::RDS::DBInstance'
)
def _find_usage_snapshots(self):
"""find usage for (manual) DB snapshots"""
- snaps = boto_query_wrapper(
- self.conn.describe_db_snapshots,
- alc_marker_path=[
- "DescribeDBSnapshotsResponse",
- "DescribeDBSnapshotsResult",
- 'Marker'
- ],
- alc_data_path=[
- "DescribeDBSnapshotsResponse",
- "DescribeDBSnapshotsResult",
- "DBSnapshots"
- ],
- alc_marker_param='marker'
- )
- snaps = snaps[
- "DescribeDBSnapshotsResponse"]["DescribeDBSnapshotsResult"][
- "DBSnapshots"]
- num_manual_snaps = 0
- for snap in snaps:
- if snap['SnapshotType'] == 'manual':
- num_manual_snaps += 1
+ count = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_db_snapshots')
+ for page in paginator.paginate():
+ for snap in page['DBSnapshots']:
+ if snap['SnapshotType'] == 'manual':
+ count += 1
self.limits['DB snapshots per user']._add_current_usage(
- num_manual_snaps,
+ count,
aws_type='AWS::RDS::DBSnapshot'
)
def _find_usage_param_groups(self):
"""find usage for parameter groups"""
- params = boto_query_wrapper(
- self.conn.describe_db_parameter_groups,
- alc_marker_path=[
- "DescribeDBParameterGroupsResponse",
- "DescribeDBParameterGroupsResult",
- 'Marker'
- ],
- alc_data_path=[
- "DescribeDBParameterGroupsResponse",
- "DescribeDBParameterGroupsResult",
- "DBParameterGroups"
- ],
- alc_marker_param='marker'
- )
- params = params[
- "DescribeDBParameterGroupsResponse"][
- "DescribeDBParameterGroupsResult"][
- "DBParameterGroups"]
+ count = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_db_parameter_groups')
+ for page in paginator.paginate():
+ for group in page['DBParameterGroups']:
+ count += 1
self.limits['DB parameter groups']._add_current_usage(
- len(params),
+ count,
aws_type='AWS::RDS::DBParameterGroup'
)
def _find_usage_subnet_groups(self):
"""find usage for subnet groups"""
- groups = boto_query_wrapper(
- self.conn.describe_db_subnet_groups,
- alc_marker_path=[
- "DescribeDBSubnetGroupsResponse",
- "DescribeDBSubnetGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeDBSubnetGroupsResponse",
- "DescribeDBSubnetGroupsResult",
- "DBSubnetGroups"
- ],
- alc_marker_param='marker'
- )[
- "DescribeDBSubnetGroupsResponse"][
- "DescribeDBSubnetGroupsResult"][
- "DBSubnetGroups"]
count = 0
- for group in groups:
- count += 1
- self.limits['Subnets per Subnet Group']._add_current_usage(
- len(group['Subnets']),
- aws_type='AWS::RDS::DBSubnetGroup',
- resource_id=group["DBSubnetGroupName"],
- )
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_db_subnet_groups')
+ for page in paginator.paginate():
+ for group in page['DBSubnetGroups']:
+ count += 1
+ self.limits['Subnets per Subnet Group']._add_current_usage(
+ len(group['Subnets']),
+ aws_type='AWS::RDS::DBSubnetGroup',
+ resource_id=group["DBSubnetGroupName"],
+ )
self.limits['Subnet Groups']._add_current_usage(
count,
aws_type='AWS::RDS::DBSubnetGroup',
@@ -235,82 +158,47 @@ def _find_usage_subnet_groups(self):
def _find_usage_option_groups(self):
"""find usage for option groups"""
- groups = boto_query_wrapper(
- self.conn.describe_option_groups,
- alc_marker_path=[
- "DescribeOptionGroupsResponse",
- "DescribeOptionGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeOptionGroupsResponse",
- "DescribeOptionGroupsResult",
- "OptionGroupsList"
- ],
- alc_marker_param='marker'
- )[
- "DescribeOptionGroupsResponse"][
- "DescribeOptionGroupsResult"]["OptionGroupsList"]
+ count = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_option_groups')
+ for page in paginator.paginate():
+ for group in page['OptionGroupsList']:
+ count += 1
self.limits['Option Groups']._add_current_usage(
- len(groups),
+ count,
aws_type='AWS::RDS::DBOptionGroup',
)
def _find_usage_event_subscriptions(self):
"""find usage for event subscriptions"""
- subs = boto_query_wrapper(
- self.conn.describe_event_subscriptions,
- alc_marker_path=[
- "DescribeEventSubscriptionsResponse",
- "DescribeEventSubscriptionsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeEventSubscriptionsResponse",
- "DescribeEventSubscriptionsResult",
- "EventSubscriptionsList"
- ],
- alc_marker_param='marker'
- )[
- "DescribeEventSubscriptionsResponse"][
- "DescribeEventSubscriptionsResult"][
- "EventSubscriptionsList"]
+ count = 0
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_event_subscriptions')
+ for page in paginator.paginate():
+ for group in page['EventSubscriptionsList']:
+ count += 1
self.limits['Event Subscriptions']._add_current_usage(
- len(subs),
+ count,
aws_type='AWS::RDS::EventSubscription',
)
def _find_usage_security_groups(self):
"""find usage for security groups"""
- groups = boto_query_wrapper(
- self.conn.describe_db_security_groups,
- alc_marker_path=[
- "DescribeDBSecurityGroupsResponse",
- "DescribeDBSecurityGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeDBSecurityGroupsResponse",
- "DescribeDBSecurityGroupsResult",
- "DBSecurityGroups"
- ],
- alc_marker_param='marker'
- )[
- "DescribeDBSecurityGroupsResponse"][
- "DescribeDBSecurityGroupsResult"][
- "DBSecurityGroups"]
vpc_count = 0
classic_count = 0
- for group in groups:
- if group['VpcId'] is None:
- classic_count += 1
- else:
- vpc_count += 1
- self.limits['Max auths per security group']._add_current_usage(
- len(group["EC2SecurityGroups"]) + len(group["IPRanges"]),
- aws_type='AWS::RDS::DBSecurityGroup',
- resource_id=group['DBSecurityGroupName']
- )
+ # this boto3 class has a paginator, so no need for boto_query_wrapper
+ paginator = self.conn.get_paginator('describe_db_security_groups')
+ for page in paginator.paginate():
+ for group in page['DBSecurityGroups']:
+ if 'VpcId' not in group or group['VpcId'] is None:
+ classic_count += 1
+ else:
+ vpc_count += 1
+ self.limits['Max auths per security group']._add_current_usage(
+ len(group["EC2SecurityGroups"]) + len(group["IPRanges"]),
+ aws_type='AWS::RDS::DBSecurityGroup',
+ resource_id=group['DBSecurityGroupName']
+ )
self.limits['DB security groups']._add_current_usage(
classic_count,
diff --git a/awslimitchecker/services/vpc.py b/awslimitchecker/services/vpc.py
index 33d5dcce..85046814 100644
--- a/awslimitchecker/services/vpc.py
+++ b/awslimitchecker/services/vpc.py
@@ -38,8 +38,6 @@
"""
import abc # noqa
-import boto
-import boto.vpc
import logging
from collections import defaultdict
@@ -53,15 +51,7 @@
class _VpcService(_AwsService):
service_name = 'VPC'
-
- def connect(self):
- """Connect to API if not already connected; set self.conn."""
- if self.conn is not None:
- return
- elif self.region:
- self.conn = self.connect_via(boto.vpc.connect_to_region)
- else:
- self.conn = boto.connect_vpc()
+ api_name = 'ec2'
def find_usage(self):
"""
@@ -84,9 +74,10 @@ def find_usage(self):
def _find_usage_vpcs(self):
"""find usage for VPCs"""
# overall number of VPCs
- vpcs = boto_query_wrapper(self.conn.get_all_vpcs)
+ vpcs = boto_query_wrapper(self.conn.describe_vpcs,
+ alc_no_paginate=True)
self.limits['VPCs']._add_current_usage(
- len(vpcs),
+ len(vpcs['Vpcs']),
aws_type='AWS::EC2::VPC'
)
@@ -94,9 +85,9 @@ def _find_usage_subnets(self):
"""find usage for Subnets"""
# subnets per VPC
subnets = defaultdict(int)
- for subnet in boto_query_wrapper(self.conn.get_all_subnets):
- # boto.vpc.subnet.Subnet
- subnets[subnet.vpc_id] += 1
+ for subnet in boto_query_wrapper(self.conn.describe_subnets,
+ alc_no_paginate=True)['Subnets']:
+ subnets[subnet['VpcId']] += 1
for vpc_id in subnets:
self.limits['Subnets per VPC']._add_current_usage(
subnets[vpc_id],
@@ -108,14 +99,15 @@ def _find_usage_ACLs(self):
"""find usage for ACLs"""
# Network ACLs per VPC
acls = defaultdict(int)
- for acl in boto_query_wrapper(self.conn.get_all_network_acls):
- # boto.vpc.networkacl.NetworkAcl
- acls[acl.vpc_id] += 1
+ result = boto_query_wrapper(self.conn.describe_network_acls,
+ alc_no_paginate=True)
+ for acl in result['NetworkAcls']:
+ acls[acl['VpcId']] += 1
# Rules per network ACL
self.limits['Rules per network ACL']._add_current_usage(
- len(acl.network_acl_entries),
+ len(acl['Entries']),
aws_type='AWS::EC2::NetworkAcl',
- resource_id=acl.id
+ resource_id=acl['NetworkAclId']
)
for vpc_id in acls:
self.limits['Network ACLs per VPC']._add_current_usage(
@@ -128,14 +120,15 @@ def _find_usage_route_tables(self):
"""find usage for route tables"""
# Route tables per VPC
tables = defaultdict(int)
- for table in boto_query_wrapper(self.conn.get_all_route_tables):
- # boto.vpc.routetable.RouteTable
- tables[table.vpc_id] += 1
+ result = boto_query_wrapper(self.conn.describe_route_tables,
+ alc_no_paginate=True)
+ for table in result['RouteTables']:
+ tables[table['VpcId']] += 1
# Entries per route table
self.limits['Entries per route table']._add_current_usage(
- len(table.routes),
+ len(table['Routes']),
aws_type='AWS::EC2::RouteTable',
- resource_id=table.id
+ resource_id=table['RouteTableId']
)
for vpc_id in tables:
self.limits['Route tables per VPC']._add_current_usage(
@@ -147,9 +140,10 @@ def _find_usage_route_tables(self):
def _find_usage_gateways(self):
"""find usage for Internet Gateways"""
# Internet gateways
- gws = boto_query_wrapper(self.conn.get_all_internet_gateways)
+ gws = boto_query_wrapper(self.conn.describe_internet_gateways,
+ alc_no_paginate=True)
self.limits['Internet gateways']._add_current_usage(
- len(gws),
+ len(gws['InternetGateways']),
aws_type='AWS::EC2::InternetGateway',
)
diff --git a/awslimitchecker/tests/services/result_fixtures.py b/awslimitchecker/tests/services/result_fixtures.py
new file mode 100644
index 00000000..7a8e7915
--- /dev/null
+++ b/awslimitchecker/tests/services/result_fixtures.py
@@ -0,0 +1,1596 @@
+"""
+awslimitchecker/tests/services/result_fixtures.py
+
+The latest version of this package is available at:
+
+
+################################################################################
+Copyright 2015 Jason Antman
+
+ This file is part of awslimitchecker, also known as awslimitchecker.
+
+ awslimitchecker is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ awslimitchecker is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with awslimitchecker. If not, see .
+
+The Copyright and Authors attributions contained herein may not be removed or
+otherwise altered, except to add the Author attribution of a contributor to
+this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
+################################################################################
+While not legally required, I sincerely request that anyone who finds
+bugs please submit them at or
+to me via email, and that you send any contributions or improvements
+either as a pull request on GitHub, or to me via email.
+################################################################################
+
+AUTHORS:
+Jason Antman
+################################################################################
+"""
+
+import sys
+from datetime import datetime
+import boto3
+from boto3.utils import ServiceContext
+
+# https://code.google.com/p/mock/issues/detail?id=249
+# py>=3.4 should use unittest.mock not the mock package on pypi
+if (
+ sys.version_info[0] < 3 or
+ sys.version_info[0] == 3 and sys.version_info[1] < 4
+):
+ from mock import Mock
+else:
+ from unittest.mock import Mock
+
+# boto3 response fixtures
+
+
+def get_boto3_resource_model(service_name, resource_name):
+ """
+ Return a boto3 resource model class for the given service_name and
+ resource_name (type).
+
+ NOTE that when the boto3.session.Session object is instantiated, the
+ underlying botocore Session will attempt HTTP requests to 169.254.169.254
+ to retrieve Instance Metadata and an IAM Role. In order to prevent this,
+ you should simply export some bogus AWS credential environment variables.
+
+ :param service_name: name of the service
+ :type service_name: str
+ :param resource_name: name of the resource type/model to get
+ :type resource_name: str
+ :return: boto3 resource model class
+ """
+ session = boto3.session.Session(region_name='us-east-1')
+ loader = session._session.get_component('data_loader')
+ json_resource_model = loader.load_service_model(service_name,
+ 'resources-1')
+ service_resource = session.resource(service_name)
+ service_model = service_resource.meta.client.meta.service_model
+
+ resource_model = json_resource_model['resources'][resource_name]
+ resource_cls = session.resource_factory.load_from_definition(
+ resource_name=resource_name,
+ single_resource_json_definition=resource_model,
+ service_context=ServiceContext(
+ service_name=service_name,
+ resource_json_definitions=json_resource_model['resources'],
+ service_model=service_model,
+ service_waiter_model=None
+ )
+ )
+ return resource_cls
+
+# get some resource models for specs...
+Instance = get_boto3_resource_model('ec2', 'Instance')
+SecurityGroup = get_boto3_resource_model('ec2', 'SecurityGroup')
+ClassicAddress = get_boto3_resource_model('ec2', 'ClassicAddress')
+VpcAddress = get_boto3_resource_model('ec2', 'VpcAddress')
+NetworkInterface = get_boto3_resource_model('ec2', 'NetworkInterface')
+
+
+class EBS(object):
+
+ test_find_usage_ebs = {
+ 'Volumes': [
+ # 500G magnetic
+ {
+ 'VolumeId': 'vol-1',
+ 'Size': 500,
+ 'VolumeType': 'standard',
+ 'Iops': None,
+ # boilerplate sample response
+ 'SnapshotId': 'string',
+ 'AvailabilityZone': 'string',
+ 'State': 'available',
+ 'CreateTime': datetime(2015, 1, 1),
+ 'Attachments': [
+ {
+ 'VolumeId': 'string',
+ 'InstanceId': 'string',
+ 'Device': 'string',
+ 'State': 'attached',
+ 'AttachTime': datetime(2015, 1, 1),
+ 'DeleteOnTermination': True
+ },
+ ],
+ 'Tags': [
+ {
+ 'Key': 'string',
+ 'Value': 'string'
+ },
+ ],
+ 'Encrypted': False,
+ 'KmsKeyId': 'string'
+ },
+ # 8G magnetic
+ {
+ 'VolumeId': 'vol-2',
+ 'Size': 8,
+ 'VolumeType': 'standard',
+ 'Iops': None,
+ },
+ # 15G general purpose SSD, 45 IOPS
+ {
+ 'VolumeId': 'vol-3',
+ 'Size': 15,
+ 'VolumeType': 'gp2',
+ 'Iops': 45,
+ },
+ # 30G general purpose SSD, 90 IOPS
+ {
+ 'VolumeId': 'vol-4',
+ 'Size': 30,
+ 'VolumeType': 'gp2',
+ 'Iops': 90,
+ },
+ # 400G PIOPS, 700 IOPS
+ {
+ 'VolumeId': 'vol-5',
+ 'Size': 400,
+ 'VolumeType': 'io1',
+ 'Iops': 700,
+ },
+ # 100G PIOPS, 300 IOPS
+ {
+ 'VolumeId': 'vol-6',
+ 'Size': 100,
+ 'VolumeType': 'io1',
+ 'Iops': 300,
+ },
+ # othertype
+ {
+ 'VolumeId': 'vol-7',
+ 'VolumeType': 'othertype',
+ },
+ ]
+ }
+
+ test_find_usage_snapshots = {
+ 'Snapshots': [
+ {
+ 'SnapshotId': 'snap-1',
+ 'VolumeId': 'string',
+ 'State': 'completed',
+ 'StateMessage': 'string',
+ 'StartTime': datetime(2015, 1, 1),
+ 'Progress': 'string',
+ 'OwnerId': 'string',
+ 'Description': 'string',
+ 'VolumeSize': 123,
+ 'OwnerAlias': 'string',
+ 'Tags': [
+ {
+ 'Key': 'string',
+ 'Value': 'string'
+ },
+ ],
+ 'Encrypted': False,
+ 'KmsKeyId': 'string',
+ 'DataEncryptionKeyId': 'string'
+ },
+ {'SnapshotId': 'snap-2'},
+ {'SnapshotId': 'snap-3'},
+ ]
+ }
+
+
+class VPC(object):
+ test_find_usage_vpcs = {
+ 'Vpcs': [
+ {
+ 'VpcId': 'vpc-1',
+ 'State': 'available',
+ 'CidrBlock': 'string',
+ 'DhcpOptionsId': 'string',
+ 'Tags': [
+ {
+ 'Key': 'fooTag',
+ 'Value': 'fooVal'
+ },
+ ],
+ 'InstanceTenancy': 'default',
+ 'IsDefault': False
+ },
+ {'VpcId': 'vpc-2'},
+ ]
+ }
+
+ test_find_usage_subnets = {
+ 'Subnets': [
+ {
+ 'SubnetId': 'string',
+ 'State': 'available',
+ 'VpcId': 'vpc-1',
+ 'CidrBlock': 'string',
+ 'AvailableIpAddressCount': 123,
+ 'AvailabilityZone': 'string',
+ 'DefaultForAz': False,
+ 'MapPublicIpOnLaunch': True,
+ 'Tags': [
+ {
+ 'Key': 'tagKey',
+ 'Value': 'tagVal'
+ },
+ ]
+ },
+ {'VpcId': 'vpc-1'},
+ {'VpcId': 'vpc-2'},
+ ]
+ }
+
+ test_find_usage_acls = {
+ 'NetworkAcls': [
+ {
+ 'NetworkAclId': 'acl-1',
+ 'VpcId': 'vpc-1',
+ 'IsDefault': True,
+ 'Entries': [
+ {
+ 'RuleNumber': 123,
+ 'Protocol': 'string',
+ 'RuleAction': 'allow',
+ 'Egress': True,
+ 'CidrBlock': 'string',
+ 'IcmpTypeCode': {
+ 'Type': 123,
+ 'Code': 123
+ },
+ 'PortRange': {
+ 'From': 123,
+ 'To': 123
+ }
+ },
+ {
+ 'RuleNumber': 124,
+ 'Protocol': 'string',
+ 'RuleAction': 'allow',
+ 'Egress': False,
+ 'CidrBlock': 'string',
+ 'IcmpTypeCode': {
+ 'Type': 123,
+ 'Code': 123
+ },
+ 'PortRange': {
+ 'From': 124,
+ 'To': 124
+ }
+ },
+ {
+ 'RuleNumber': 125,
+ 'Protocol': 'string',
+ 'RuleAction': 'deny',
+ 'Egress': False,
+ 'CidrBlock': 'string',
+ 'IcmpTypeCode': {
+ 'Type': 123,
+ 'Code': 123
+ },
+ 'PortRange': {
+ 'From': 125,
+ 'To': 125
+ }
+ },
+ ],
+ 'Associations': [
+ {
+ 'NetworkAclAssociationId': 'string',
+ 'NetworkAclId': 'string',
+ 'SubnetId': 'string'
+ },
+ ],
+ 'Tags': [
+ {
+ 'Key': 'tagKey',
+ 'Value': 'tagVal'
+ },
+ ]
+ },
+ {
+ 'NetworkAclId': 'acl-2',
+ 'VpcId': 'vpc-1',
+ 'Entries': [1],
+ },
+ {
+ 'NetworkAclId': 'acl-3',
+ 'VpcId': 'vpc-2',
+ 'Entries': [1, 2, 3, 4, 5],
+ },
+ ]
+ }
+
+ test_find_usage_route_tables = {
+ 'RouteTables': [
+ {
+ 'RouteTableId': 'rt-1',
+ 'VpcId': 'vpc-1',
+ 'Routes': [
+ {
+ 'DestinationCidrBlock': 'string',
+ 'DestinationPrefixListId': 'string',
+ 'GatewayId': 'string',
+ 'InstanceId': 'string',
+ 'InstanceOwnerId': 'string',
+ 'NetworkInterfaceId': 'string',
+ 'VpcPeeringConnectionId': 'string',
+ 'NatGatewayId': 'string',
+ 'State': 'active',
+ 'Origin': 'CreateRouteTable'
+ },
+ {'foo': 'bar', 'baz': 'blam'},
+ {'foo': 'bar', 'baz': 'blam'},
+ ],
+ 'Associations': [
+ {
+ 'RouteTableAssociationId': 'string',
+ 'RouteTableId': 'string',
+ 'SubnetId': 'string',
+ 'Main': True
+ },
+ ],
+ 'Tags': [
+ {
+ 'Key': 'tagKey',
+ 'Value': 'tagVal'
+ },
+ ],
+ 'PropagatingVgws': [
+ {
+ 'GatewayId': 'string'
+ },
+ ]
+ },
+ {
+ 'RouteTableId': 'rt-2',
+ 'VpcId': 'vpc-1',
+ 'Routes': [
+ {'foo': 'bar', 'baz': 'blam'},
+ ],
+ },
+ {
+ 'RouteTableId': 'rt-3',
+ 'VpcId': 'vpc-2',
+ 'Routes': [
+ {'foo': 'bar', 'baz': 'blam'},
+ {'foo': 'bar', 'baz': 'blam'},
+ {'foo': 'bar', 'baz': 'blam'},
+ {'foo': 'bar', 'baz': 'blam'},
+ {'foo': 'bar', 'baz': 'blam'},
+ ],
+ }
+ ]
+ }
+
+ test_find_usage_internet_gateways = {
+ 'InternetGateways': [
+ {
+ 'InternetGatewayId': 'gw-1',
+ 'Attachments': [
+ {
+ 'VpcId': 'string',
+ 'State': 'attached'
+ },
+ ],
+ 'Tags': [
+ {
+ 'Key': 'tagKey',
+ 'Value': 'tagVal'
+ },
+ ]
+ },
+ {'InternetGatewayId': 'gw-2'}
+ ]
+ }
+
+
+class RDS(object):
+ test_find_usage_instances = []
+ # first result page
+ test_find_usage_instances.append({
+ 'DBInstances': [
+ {
+ 'PubliclyAccessible': False,
+ 'MasterUsername': 'myuser',
+ 'LicenseModel': 'general-public-license',
+ 'VpcSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'VpcSecurityGroupId': 'sg-aaaaaaaa'
+ }
+ ],
+ 'InstanceCreateTime': 1429910904.366,
+ 'OptionGroupMemberships': [
+ {
+ 'Status': 'in-sync',
+ 'OptionGroupName': 'default:mysql-5-6'
+ }
+ ],
+ 'PendingModifiedValues': {
+ 'MultiAZ': None,
+ 'MasterUserPassword': None,
+ 'Port': None,
+ 'Iops': None,
+ 'AllocatedStorage': None,
+ 'EngineVersion': None,
+ 'BackupRetentionPeriod': None,
+ 'DBInstanceClass': None,
+ 'DBInstanceIdentifier': None
+ },
+ 'Engine': 'mysql',
+ 'MultiAZ': True,
+ 'LatestRestorableTime': 1435966800.0,
+ 'DBSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'DBSecurityGroupName': 'mydb-dbsecuritygroup-aaaa'
+ }
+ ],
+ 'DBParameterGroups': [
+ {
+ 'DBParameterGroupName': 'default.mysql5.6',
+ 'ParameterApplyStatus': 'in-sync'
+ }
+ ],
+ 'ReadReplicaSourceDBInstanceIdentifier': None,
+ 'AutoMinorVersionUpgrade': True,
+ 'PreferredBackupWindow': '07:00-08:00',
+ 'DBSubnetGroup': {
+ 'VpcId': 'vpc-abcdef01',
+ 'Subnets': [
+ {
+ 'SubnetStatus': 'Active',
+ 'SubnetIdentifier': 'subnet-aaaaaaaa',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1d',
+ 'ProvisionedIopsCapable': False
+ }
+ },
+ {
+ 'SubnetStatus': 'Active',
+ 'SubnetIdentifier': 'subnet-22222222',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1a',
+ 'ProvisionedIopsCapable': False
+ }
+ }
+ ],
+ 'DBSubnetGroupName': 'mydb-dbsubnetgroup-abcd',
+ 'SubnetGroupStatus': 'Complete',
+ 'DBSubnetGroupDescription': 'Subnet group for RDS instance'
+ },
+ 'SecondaryAvailabilityZone': 'us-east-1a',
+ 'ReadReplicaDBInstanceIdentifiers': [],
+ 'AllocatedStorage': 200,
+ 'BackupRetentionPeriod': 7,
+ 'DBName': 'wordpress',
+ 'PreferredMaintenanceWindow': 'tue:08:00-tue:09:00',
+ 'Endpoint': {
+ 'Port': 3306,
+ 'Address': 'foo.bar.us-east-1.rds.amazonaws.com'
+ },
+ 'DBInstanceStatus': 'available',
+ 'StatusInfos': None,
+ 'EngineVersion': '5.6.22',
+ 'CharacterSetName': None,
+ 'AvailabilityZone': 'us-east-1d',
+ 'Iops': None,
+ 'DBInstanceClass': 'db.t2.small',
+ 'DBInstanceIdentifier': 'foo'
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ # second result page
+ test_find_usage_instances.append({
+ 'DBInstances': [
+ {
+ 'PubliclyAccessible': False,
+ 'MasterUsername': 'myuser2',
+ 'LicenseModel': 'postgresql-license',
+ 'VpcSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'VpcSecurityGroupId': 'sg-12345678'
+ }
+ ],
+ 'InstanceCreateTime': 1432238504.239,
+ 'OptionGroupMemberships': [
+ {
+ 'Status': 'in-sync',
+ 'OptionGroupName': 'default:postgres-9-3'
+ }
+ ],
+ 'PendingModifiedValues': {
+ 'MultiAZ': None,
+ 'MasterUserPassword': None,
+ 'Port': None,
+ 'Iops': None,
+ 'AllocatedStorage': None,
+ 'EngineVersion': None,
+ 'BackupRetentionPeriod': None,
+ 'DBInstanceClass': None,
+ 'DBInstanceIdentifier': None
+ },
+ 'Engine': 'postgres',
+ 'MultiAZ': False,
+ 'LatestRestorableTime': 1435966550.0,
+ 'DBSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'DBSecurityGroupName': 'sg1234-dbsecuritygroup-abcd'
+ }
+ ],
+ 'DBParameterGroups': [
+ {
+ 'DBParameterGroupName': 'default.postgres9.3',
+ 'ParameterApplyStatus': 'in-sync'
+ }
+ ],
+ 'ReadReplicaSourceDBInstanceIdentifier': None,
+ 'AutoMinorVersionUpgrade': True,
+ 'PreferredBackupWindow': '03:09-03:39',
+ 'DBSubnetGroup': {
+ 'VpcId': 'vpc-87654321',
+ 'Subnets': [
+ {
+ 'SubnetStatus': 'Active',
+ 'SubnetIdentifier': 'subnet-a1234567',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1e',
+ 'ProvisionedIopsCapable': False
+ }
+ },
+ {
+ 'SubnetStatus': 'Active',
+ 'SubnetIdentifier': 'subnet-b1234567',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1a',
+ 'ProvisionedIopsCapable': False
+ }
+ },
+ {
+ 'SubnetStatus': 'Active',
+ 'SubnetIdentifier': 'subnet-c1234567',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1d',
+ 'ProvisionedIopsCapable': False
+ }
+ }
+ ],
+ 'DBSubnetGroupName': 'mydb-dbsubnetgroup-abcdef',
+ 'SubnetGroupStatus': 'Complete',
+ 'DBSubnetGroupDescription': 'Subnet group for RDS instance'
+ },
+ 'SecondaryAvailabilityZone': None,
+ 'ReadReplicaDBInstanceIdentifiers': ['db-123', 'db-456'],
+ 'AllocatedStorage': 50,
+ 'BackupRetentionPeriod': 1,
+ 'DBName': 'mydbname',
+ 'PreferredMaintenanceWindow': 'mon:05:11-mon:05:41',
+ 'Endpoint': {
+ 'Port': 5432,
+ 'Address': 'baz.blam.us-east-1.rds.amazonaws.com'
+ },
+ 'DBInstanceStatus': 'available',
+ 'StatusInfos': None,
+ 'EngineVersion': '9.3.6',
+ 'CharacterSetName': None,
+ 'AvailabilityZone': 'us-east-1a',
+ 'Iops': None,
+ 'DBInstanceClass': 'db.t2.small',
+ 'DBInstanceIdentifier': 'baz'
+ }
+ ]
+ })
+
+ test_find_usage_snapshots = []
+ # first result page
+ test_find_usage_snapshots.append({
+ "DBSnapshots": [
+ {
+ "AllocatedStorage": 100,
+ "AvailabilityZone": "us-east-1a",
+ "DBInstanceIdentifier": "foo-db",
+ "DBSnapshotIdentifier": "foo-db-final-snapshot",
+ "Engine": "postgres",
+ "EngineVersion": "9.3.3",
+ "InstanceCreateTime": 1408035263.101,
+ "Iops": 1000,
+ "LicenseModel": "postgresql-license",
+ "MasterUsername": "dbfoouser",
+ "OptionGroupName": "default:postgres-9-3",
+ "PercentProgress": 100,
+ "Port": 5432,
+ "SnapshotCreateTime": 1408454469.536,
+ "SnapshotType": "manual",
+ "SourceRegion": None,
+ "Status": "available",
+ "VpcId": None
+ },
+ {
+ "AllocatedStorage": 50,
+ "AvailabilityZone": "us-east-1d",
+ "DBInstanceIdentifier": "bd1t3lf90p3lqdx",
+ "DBSnapshotIdentifier":
+ "rds:bd1t3lf90p3lqdx-2015-06-29-07-02",
+ "Engine": "mysql",
+ "EngineVersion": "5.6.22",
+ "InstanceCreateTime": 1429910904.366,
+ "Iops": None,
+ "LicenseModel": "general-public-license",
+ "MasterUsername": "dbuser3",
+ "OptionGroupName": "default:mysql-5-6",
+ "PercentProgress": 100,
+ "Port": 3306,
+ "SnapshotCreateTime": 1435561349.441,
+ "SnapshotType": "automated",
+ "SourceRegion": None,
+ "Status": "available",
+ "VpcId": "vpc-1ee8937b"
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_snapshots.append({
+ 'DBSnapshots': [
+ {
+ "AllocatedStorage": 25,
+ "AvailabilityZone": "us-east-1d",
+ "DBInstanceIdentifier": "md1e8qwtegkjdgy",
+ "DBSnapshotIdentifier":
+ "rds:md1e8qwtegkjdgy-2015-06-29-07-06",
+ "Engine": "postgres",
+ "EngineVersion": "9.3.6",
+ "InstanceCreateTime": 1433883813.314,
+ "Iops": None,
+ "LicenseModel": "postgresql-license",
+ "MasterUsername": "dbuser4",
+ "OptionGroupName": "default:postgres-9-3",
+ "PercentProgress": 100,
+ "Port": 5432,
+ "SnapshotCreateTime": 1435561593.669,
+ "SnapshotType": "automated",
+ "SourceRegion": None,
+ "Status": "available",
+ "VpcId": "vpc-1ee8937b"
+ },
+ ],
+ })
+
+ test_find_usage_param_groups = []
+ test_find_usage_param_groups.append({
+ "DBParameterGroups": [
+ {
+ "DBParameterGroupFamily": "mysql5.6",
+ "DBParameterGroupName": "default.mysql5.6",
+ "Description":
+ "Default parameter group for mysql5.6"
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_param_groups.append({
+ 'DBParameterGroups': [
+ {
+ "DBParameterGroupFamily": "postgres9.3",
+ "DBParameterGroupName": "default.postgres9.3",
+ "Description":
+ "Default parameter group for postgres9.3"
+ }
+ ]
+ })
+
+ test_find_usage_subnet_groups = []
+ test_find_usage_subnet_groups.append({
+ "DBSubnetGroups": [
+ {
+ "DBSubnetGroupDescription":
+ "Subnet group for CloudFormation RDS instance",
+ "DBSubnetGroupName":
+ "SubnetGroup1",
+ "SubnetGroupStatus": "Complete",
+ "Subnets": [
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1d",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-38e87861",
+ "SubnetStatus": "Active"
+ },
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1a",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-4f027f38",
+ "SubnetStatus": "Active"
+ }
+ ],
+ "VpcId": "vpc-1ee8937b"
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_subnet_groups.append({
+ 'DBSubnetGroups': [
+ {
+ "DBSubnetGroupDescription":
+ "Created from the RDS Management Console",
+ "DBSubnetGroupName": "default",
+ "SubnetGroupStatus": "Complete",
+ "Subnets": [
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1e",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-49071f61",
+ "SubnetStatus": "Active"
+ },
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1a",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-6fe23c18",
+ "SubnetStatus": "Active"
+ },
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1d",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-a9b54df0",
+ "SubnetStatus": "Active"
+ }
+ ],
+ "VpcId": "vpc-c300b9a6"
+ },
+ {
+ "DBSubnetGroupDescription":
+ "Subnet group for CloudFormation RDS instance",
+ "DBSubnetGroupName":
+ "SubnetGroup2",
+ "SubnetGroupStatus": "Complete",
+ "Subnets": [
+ {
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1a",
+ "ProvisionedIopsCapable": False
+ },
+ "SubnetIdentifier": "subnet-0b037e7c",
+ "SubnetStatus": "Active"
+ }
+ ],
+ "VpcId": "vpc-73ec9716"
+ },
+ ],
+ })
+
+ test_find_usage_option_groups = []
+ test_find_usage_option_groups.append({
+ "OptionGroupsList": [
+ {
+ "AllowsVpcAndNonVpcInstanceMemberships": True,
+ "EngineName": "mysql",
+ "MajorEngineVersion": "5.6",
+ "OptionGroupDescription":
+ "Default option group for mysql 5.6",
+ "OptionGroupName": "default:mysql-5-6",
+ "Options": [],
+ "VpcId": None
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_option_groups.append({
+ 'OptionGroupsList': [
+ {
+ "AllowsVpcAndNonVpcInstanceMemberships": True,
+ "EngineName": "postgres",
+ "MajorEngineVersion": "9.3",
+ "OptionGroupDescription":
+ "Default option group for postgres 9.3",
+ "OptionGroupName": "default:postgres-9-3",
+ "Options": [],
+ "VpcId": None
+ }
+ ]
+ })
+
+ test_find_usage_event_subscriptions = []
+ test_find_usage_event_subscriptions.append({
+ "EventSubscriptionsList": [
+ {
+ 'CustomerAwsId': 'string',
+ 'CustSubscriptionId': 'string',
+ 'SnsTopicArn': 'string',
+ 'Status': 'string',
+ 'SubscriptionCreationTime': 'string',
+ 'SourceType': 'string',
+ 'SourceIdsList': [
+ 'string',
+ ],
+ 'EventCategoriesList': [
+ 'string',
+ ],
+ 'Enabled': True
+ }
+ ],
+ "NextToken": 'string'
+ })
+ test_find_usage_event_subscriptions.append({
+ "EventSubscriptionsList": [
+ {
+ 'CustomerAwsId': 'string',
+ 'CustSubscriptionId': 'string',
+ 'SnsTopicArn': 'string',
+ 'Status': 'string',
+ 'SubscriptionCreationTime': 'string',
+ 'SourceType': 'string',
+ 'SourceIdsList': [
+ 'string',
+ ],
+ 'EventCategoriesList': [
+ 'string',
+ ],
+ 'Enabled': False
+ }
+ ]
+ })
+
+ test_find_usage_security_groups = []
+ test_find_usage_security_groups.append({
+ "DBSecurityGroups": [
+ {
+ "DBSecurityGroupDescription": "Frontend Access",
+ "DBSecurityGroupName":
+ "SecurityGroup1",
+ "EC2SecurityGroups": [
+ {
+ "EC2SecurityGroupId": "sg-c6dd95a2",
+ "EC2SecurityGroupName":
+ "EC2SG1",
+ "EC2SecurityGroupOwnerId": None,
+ "Status": "authorized"
+ }
+ ],
+ "IPRanges": [],
+ "OwnerId": "123456789012",
+ "VpcId": None
+ },
+ {
+ "DBSecurityGroupDescription":
+ "default:vpc-a926c2cc",
+ "DBSecurityGroupName": "default:vpc-a926c2cc",
+ "EC2SecurityGroups": [],
+ "IPRanges": [],
+ "OwnerId": "123456789012",
+ "VpcId": "vpc-a926c2cc"
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_security_groups.append({
+ 'DBSecurityGroups': [
+ {
+ "DBSecurityGroupDescription": "Frontend Access",
+ "DBSecurityGroupName": "SecurityGroup2",
+ "EC2SecurityGroups": [
+ {
+ "EC2SecurityGroupId": "sg-aaaaaaaa",
+ "EC2SecurityGroupName": "SGName-aaaaaaaa",
+ "EC2SecurityGroupOwnerId": None,
+ "Status": "authorized"
+ },
+ {
+ "EC2SecurityGroupId": "sg-bbbbbbbb",
+ "EC2SecurityGroupName": "SGName-bbbbbbbb",
+ "EC2SecurityGroupOwnerId": None,
+ "Status": "authorized"
+ },
+ {
+ "EC2SecurityGroupId": "sg-cccccccc",
+ "EC2SecurityGroupName": "SGName-cccccccc",
+ "EC2SecurityGroupOwnerId": None,
+ "Status": "authorized"
+ },
+ ],
+ "IPRanges": [],
+ "OwnerId": "123456789012",
+ "VpcId": "vpc-73ec9716"
+ },
+ {
+ 'VpcId': None,
+ 'DBSecurityGroupDescription':
+ 'awslimitchecker test',
+ 'IPRanges': [
+ {
+ 'Status': 'authorized',
+ 'CIDRIP': '76.122.124.15/32'
+ },
+ {
+ 'Status': 'authorized',
+ 'CIDRIP': '66.6.152.59/32'
+ }
+ ],
+ 'OwnerId': '123456789012',
+ 'EC2SecurityGroups': [],
+ 'DBSecurityGroupName': 'alctest'
+ }
+ ],
+ })
+
+ test_find_usage_reserved_instances = []
+ test_find_usage_reserved_instances.append({
+ 'ReservedDBInstances': [
+ {
+ 'ReservedDBInstanceId': 'string',
+ 'ReservedDBInstancesOfferingId': 'string',
+ 'DBInstanceClass': 'string',
+ 'StartTime': datetime(2015, 1, 1),
+ 'Duration': 123,
+ 'FixedPrice': 123.0,
+ 'UsagePrice': 123.0,
+ 'CurrencyCode': 'string',
+ 'DBInstanceCount': 123,
+ 'ProductDescription': 'string',
+ 'OfferingType': 'string',
+ 'MultiAZ': False,
+ 'State': 'string',
+ 'RecurringCharges': [
+ {
+ 'RecurringChargeAmount': 123.0,
+ 'RecurringChargeFrequency': 'string'
+ },
+ ]
+ },
+ ],
+ 'NextToken': 'string'
+ })
+ test_find_usage_reserved_instances.append({
+ 'ReservedDBInstances': [
+ {
+ 'ReservedDBInstanceId': 'string',
+ 'ReservedDBInstancesOfferingId': 'string',
+ 'DBInstanceClass': 'string',
+ 'StartTime': datetime(2015, 1, 1),
+ 'Duration': 123,
+ 'FixedPrice': 123.0,
+ 'UsagePrice': 123.0,
+ 'CurrencyCode': 'string',
+ 'DBInstanceCount': 123,
+ 'ProductDescription': 'string',
+ 'OfferingType': 'string',
+ 'MultiAZ': True,
+ 'State': 'string',
+ 'RecurringCharges': [
+ {
+ 'RecurringChargeAmount': 123.0,
+ 'RecurringChargeFrequency': 'string'
+ },
+ ]
+ },
+ ],
+ })
+
+
+class ELB(object):
+
+ test_find_usage = {
+ # this is a subset of response items
+ 'LoadBalancerDescriptions': [
+ {
+ 'LoadBalancerName': 'elb-1',
+ 'ListenerDescriptions': [
+ {'foo': 'bar'},
+ ],
+ },
+ {
+ 'LoadBalancerName': 'elb-2',
+ 'ListenerDescriptions': [
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ ],
+ },
+ {
+ 'LoadBalancerName': 'elb-3',
+ 'ListenerDescriptions': [
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ ],
+ },
+ {
+ 'LoadBalancerName': 'elb-4',
+ 'ListenerDescriptions': [
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ {'foo': 'bar'},
+ ],
+ },
+ ],
+ }
+
+
+class ElastiCache(object):
+ test_find_usage_nodes = []
+ # first page of results
+ test_find_usage_nodes.append({
+ 'CacheClusters': [
+ {
+ 'Engine': 'memcached',
+ 'CacheParameterGroup': {
+ 'CacheNodeIdsToReboot': [],
+ 'CacheParameterGroupName': 'default.memcached1.4',
+ 'ParameterApplyStatus': 'in-sync'
+ },
+ 'CacheClusterId': 'memcached1',
+ 'CacheSecurityGroups': [],
+ 'ConfigurationEndpoint': {
+ 'Port': 11211,
+ 'Address': 'memcached1.vfavzi.cfg.use1.'
+ 'cache.amazonaws.com'
+ },
+ 'CacheClusterCreateTime': 1431109646.755,
+ 'ReplicationGroupId': None,
+ 'AutoMinorVersionUpgrade': True,
+ 'CacheClusterStatus': 'available',
+ 'NumCacheNodes': 1,
+ 'PreferredAvailabilityZone': 'us-east-1d',
+ 'SecurityGroups': [
+ {
+ 'Status': 'active',
+ 'SecurityGroupId': 'sg-11111111'
+ }
+ ],
+ 'CacheSubnetGroupName': 'csg-memcached1',
+ 'EngineVersion': '1.4.14',
+ 'PendingModifiedValues': {
+ 'NumCacheNodes': None,
+ 'EngineVersion': None,
+ 'CacheNodeIdsToRemove': None
+ },
+ 'CacheNodeType': 'cache.t2.small',
+ 'NotificationConfiguration': None,
+ 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
+ 'CacheNodes': [
+ {
+ 'CacheNodeId': '0001',
+ 'Endpoint': {
+ 'Port': 11211,
+ 'Address': 'memcached1.vfavzi.0001.'
+ 'use1.cache.amazonaws.com'
+ },
+ 'CacheNodeStatus': 'available',
+ 'ParameterGroupStatus': 'in-sync',
+ 'CacheNodeCreateTime': 1431109646.755,
+ 'SourceCacheNodeId': None
+ }
+ ]
+ },
+ {
+ 'Engine': 'redis',
+ 'CacheParameterGroup': {
+ 'CacheNodeIdsToReboot': [],
+ 'CacheParameterGroupName': 'default.redis2.8',
+ 'ParameterApplyStatus': 'in-sync'
+ },
+ 'CacheClusterId': 'redis1',
+ 'CacheSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'CacheSecurityGroupName': 'csg-redis1'
+ }
+ ],
+ 'ConfigurationEndpoint': None,
+ 'CacheClusterCreateTime': 1412253787.914,
+ 'ReplicationGroupId': None,
+ 'AutoMinorVersionUpgrade': True,
+ 'CacheClusterStatus': 'available',
+ 'NumCacheNodes': 2,
+ 'PreferredAvailabilityZone': 'us-east-1a',
+ 'SecurityGroups': None,
+ 'CacheSubnetGroupName': None,
+ 'EngineVersion': '2.8.6',
+ 'PendingModifiedValues': {
+ 'NumCacheNodes': None,
+ 'EngineVersion': None,
+ 'CacheNodeIdsToRemove': None
+ },
+ 'CacheNodeType': 'cache.m3.medium',
+ 'NotificationConfiguration': None,
+ 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
+ 'CacheNodes': [
+ {
+ 'CacheNodeId': '0001',
+ 'Endpoint': {
+ 'Port': 6379,
+ 'Address': 'redis1.vfavzi.0001.use1.cache.'
+ 'amazonaws.com'
+ },
+ 'CacheNodeStatus': 'available',
+ 'ParameterGroupStatus': 'in-sync',
+ 'CacheNodeCreateTime': 1412253787.914,
+ 'SourceCacheNodeId': None
+ },
+ {
+ 'CacheNodeId': '0002',
+ 'Endpoint': {
+ 'Port': 6379,
+ 'Address': 'redis1.vfavzi.0002.use1.cache.'
+ 'amazonaws.com'
+ },
+ 'CacheNodeStatus': 'available',
+ 'ParameterGroupStatus': 'in-sync',
+ 'CacheNodeCreateTime': 1412253787.914,
+ 'SourceCacheNodeId': None
+ }
+ ]
+ }
+ ],
+ 'NextToken': 'string',
+ })
+ # second page of results
+ test_find_usage_nodes.append({
+ 'CacheClusters': [
+ {
+ 'Engine': 'redis',
+ 'CacheParameterGroup': {
+ 'CacheNodeIdsToReboot': [],
+ 'CacheParameterGroupName': 'default.redis2.8',
+ 'ParameterApplyStatus': 'in-sync'
+ },
+ 'CacheClusterId': 'redis2',
+ 'CacheSecurityGroups': [
+ {
+ 'Status': 'active',
+ 'CacheSecurityGroupName': 'csg-redis2'
+ }
+ ],
+ 'ConfigurationEndpoint': None,
+ 'CacheClusterCreateTime': 1412253787.123,
+ 'ReplicationGroupId': None,
+ 'AutoMinorVersionUpgrade': True,
+ 'CacheClusterStatus': 'available',
+ 'NumCacheNodes': 4,
+ 'PreferredAvailabilityZone': 'us-east-1a',
+ 'SecurityGroups': None,
+ 'CacheSubnetGroupName': None,
+ 'EngineVersion': '2.8.6',
+ 'PendingModifiedValues': {
+ 'NumCacheNodes': None,
+ 'EngineVersion': None,
+ 'CacheNodeIdsToRemove': None
+ },
+ 'CacheNodeType': 'cache.m3.medium',
+ 'NotificationConfiguration': None,
+ 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
+ 'CacheNodes': None,
+ },
+ ],
+ })
+
+ test_find_usage_subnet_groups = []
+ # first page of results
+ test_find_usage_subnet_groups.append({
+ 'CacheSubnetGroups': [
+ {
+ 'Subnets': [
+ {
+ 'SubnetIdentifier': 'subnet-62e8783b',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1d'}
+ },
+ {
+ 'SubnetIdentifier': 'subnet-0b037e7c',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1a'
+ }
+ }
+ ],
+ 'CacheSubnetGroupName': 'break-memca-135tjabqoyywd',
+ 'VpcId': 'vpc-73ec9716',
+ 'CacheSubnetGroupDescription': 'memcached'
+ },
+ {
+ 'Subnets': [
+ {
+ 'SubnetIdentifier': 'subnet-38e87861',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1d'
+ }
+ },
+ {
+ 'SubnetIdentifier': 'subnet-4f027f38',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1a'
+ }
+ }
+ ],
+ 'CacheSubnetGroupName': 'break-memca-6yi6axon9ol9',
+ 'VpcId': 'vpc-1ee8937b',
+ 'CacheSubnetGroupDescription': 'memcached'
+ },
+ ],
+ 'NextToken': 'str'
+ })
+ # second page of results
+ test_find_usage_subnet_groups.append({
+ 'CacheSubnetGroups': [
+ {
+ 'Subnets': [
+ {
+ 'SubnetIdentifier': 'subnet-49071f61',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1e'
+ }
+ },
+ {
+ 'SubnetIdentifier': 'subnet-6fe23c18',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1a'
+ }
+ },
+ {
+ 'SubnetIdentifier': 'subnet-a9b54df0',
+ 'SubnetAvailabilityZone': {
+ 'Name': 'us-east-1d'
+ }
+ }
+ ],
+ 'CacheSubnetGroupName': 'lsp-d-redis-14d9407dl05er',
+ 'VpcId': 'vpc-c300b9a6',
+ 'CacheSubnetGroupDescription': 'redis'
+ },
+ ],
+ })
+
+ test_find_usage_parameter_groups = []
+ # first page of results
+ test_find_usage_parameter_groups.append({
+ 'CacheParameterGroups': [
+ {
+ 'CacheParameterGroupName': 'default.memcached1.4',
+ 'CacheParameterGroupFamily': 'memcached1.4',
+ 'Description': 'Default for memcached1.4'
+ },
+ {
+ 'CacheParameterGroupName': 'default.redis2.6',
+ 'CacheParameterGroupFamily': 'redis2.6',
+ 'Description': 'Default for redis2.6'
+ },
+ ],
+ 'NextToken': 'foo'
+ })
+ # second page of results
+ test_find_usage_parameter_groups.append({
+ 'CacheParameterGroups': [
+ {
+ 'CacheParameterGroupName': 'default.redis2.8',
+ 'CacheParameterGroupFamily': 'redis2.8',
+ 'Description': 'Default for redis2.8'
+ }
+ ],
+ })
+
+ test_find_usage_security_groups = []
+ # first page of results
+ test_find_usage_security_groups.append({
+ 'CacheSecurityGroups': [
+ {
+ 'OwnerId': '123456789012',
+ 'CacheSecurityGroupName': 'default',
+ 'Description': 'default',
+ 'EC2SecurityGroups': []
+ },
+ ],
+ 'NextToken': 'foo'
+ })
+ # second page of results
+ test_find_usage_security_groups.append({
+ 'CacheSecurityGroups': [
+ {
+ 'OwnerId': '123456789012',
+ 'CacheSecurityGroupName': 'csg1',
+ 'Description': 'foo bar',
+ 'EC2SecurityGroups': [
+ {
+ 'EC2SecurityGroupName': 'ec2-sg1',
+ 'Status': 'authorized',
+ 'EC2SecurityGroupOwnerId': '123456789012'
+ }
+ ]
+ }
+ ]
+ })
+
+
+class EC2(object):
+
+ @property
+ def test_instance_usage(self):
+ mock_inst1A = Mock(spec_set=Instance)
+ type(mock_inst1A).id = '1A'
+ type(mock_inst1A).instance_type = 't2.micro'
+ type(mock_inst1A).spot_instance_request_id = None
+ type(mock_inst1A).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst1A).state = {'Code': 16, 'Name': 'running'}
+
+ mock_inst1B = Mock(spec_set=Instance)
+ type(mock_inst1B).id = '1B'
+ type(mock_inst1B).instance_type = 'r3.2xlarge'
+ type(mock_inst1B).spot_instance_request_id = None
+ type(mock_inst1B).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst1B).state = {'Code': 0, 'Name': 'pending'}
+
+ mock_inst2A = Mock(spec_set=Instance)
+ type(mock_inst2A).id = '2A'
+ type(mock_inst2A).instance_type = 'c4.4xlarge'
+ type(mock_inst2A).spot_instance_request_id = None
+ type(mock_inst2A).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst2A).state = {'Code': 32, 'Name': 'shutting-down'}
+
+ mock_inst2B = Mock(spec_set=Instance)
+ type(mock_inst2B).id = '2B'
+ type(mock_inst2B).instance_type = 't2.micro'
+ type(mock_inst2B).spot_instance_request_id = '1234'
+ type(mock_inst2B).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst2B).state = {'Code': 64, 'Name': 'stopping'}
+
+ mock_inst2C = Mock(spec_set=Instance)
+ type(mock_inst2C).id = '2C'
+ type(mock_inst2C).instance_type = 'm4.8xlarge'
+ type(mock_inst2C).spot_instance_request_id = None
+ type(mock_inst2C).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst2C).state = {'Code': 16, 'Name': 'running'}
+
+ mock_instStopped = Mock(spec_set=Instance)
+ type(mock_instStopped).id = '2C'
+ type(mock_instStopped).instance_type = 'm4.8xlarge'
+ type(mock_instStopped).spot_instance_request_id = None
+ type(mock_instStopped).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_instStopped).state = {'Code': 80, 'Name': 'stopped'}
+
+ mock_instTerm = Mock(spec_set=Instance)
+ type(mock_instTerm).id = '2C'
+ type(mock_instTerm).instance_type = 'm4.8xlarge'
+ type(mock_instTerm).spot_instance_request_id = None
+ type(mock_instTerm).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_instTerm).state = {'Code': 48, 'Name': 'terminated'}
+
+ return_value = [
+ mock_inst1A,
+ mock_inst1B,
+ mock_inst2A,
+ mock_inst2B,
+ mock_inst2C,
+ mock_instStopped,
+ mock_instTerm
+ ]
+ return return_value
+
+ @property
+ def test_instance_usage_key_error(self):
+ mock_inst1A = Mock(spec_set=Instance)
+ type(mock_inst1A).id = '1A'
+ type(mock_inst1A).instance_type = 'foobar'
+ type(mock_inst1A).spot_instance_request_id = None
+ type(mock_inst1A).placement = {'AvailabilityZone': 'az1a'}
+ type(mock_inst1A).state = {'Code': 16, 'Name': 'running'}
+ return [mock_inst1A]
+
+ @property
+ def test_find_usage_networking_sgs(self):
+ mock_sg1 = Mock(spec_set=SecurityGroup)
+ type(mock_sg1).id = 'sg-1'
+ type(mock_sg1).vpc_id = 'vpc-aaa'
+ type(mock_sg1).ip_permissions = []
+ type(mock_sg1).ip_permissions_egress = []
+ mock_sg2 = Mock(spec_set=SecurityGroup)
+ type(mock_sg2).id = 'sg-2'
+ type(mock_sg2).vpc_id = None
+ type(mock_sg2).ip_permissions = [1, 2, 3, 4, 5, 6]
+ type(mock_sg2).ip_permissions_egress = [8, 9, 10]
+ mock_sg3 = Mock(spec_set=SecurityGroup)
+ type(mock_sg3).id = 'sg-3'
+ type(mock_sg3).vpc_id = 'vpc-bbb'
+ type(mock_sg3).ip_permissions = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ type(mock_sg3).ip_permissions_egress = [6, 7, 8, 9]
+ mock_sg4 = Mock(spec_set=SecurityGroup)
+ type(mock_sg4).id = 'sg-4'
+ type(mock_sg4).vpc_id = 'vpc-aaa'
+ type(mock_sg4).ip_permissions = [1, 2, 3]
+ type(mock_sg4).ip_permissions_egress = [21, 22, 23, 24]
+
+ return_value = [
+ mock_sg1,
+ mock_sg2,
+ mock_sg3,
+ mock_sg4,
+ ]
+ return return_value
+
+ test_get_reserved_instance_count = {
+ 'ReservedInstances': [
+ {
+ 'ReservedInstancesId': 'res1',
+ 'InstanceType': 'it1',
+ 'AvailabilityZone': 'az1',
+ 'Start': datetime(2015, 1, 1),
+ 'End': datetime(2015, 1, 1),
+ 'Duration': 123,
+ 'UsagePrice': 12,
+ 'FixedPrice': 14,
+ 'InstanceCount': 1,
+ 'ProductDescription': 'Linux/UNIX',
+ 'State': 'active',
+ 'Tags': [
+ {
+ 'Key': 'tagKey',
+ 'Value': 'tagVal'
+ },
+ ],
+ 'InstanceTenancy': 'default',
+ 'CurrencyCode': 'USD',
+ 'OfferingType': 'Heavy Utilization',
+ 'RecurringCharges': [
+ {
+ 'Frequency': 'Hourly',
+ 'Amount': 123.0
+ },
+ ]
+ },
+ {
+ 'ReservedInstancesId': 'res2',
+ 'InstanceType': 'it2',
+ 'AvailabilityZone': 'az1',
+ 'InstanceCount': 1,
+ 'State': 'retired',
+ },
+ {
+ 'ReservedInstancesId': 'res3',
+ 'InstanceType': 'it1',
+ 'AvailabilityZone': 'az1',
+ 'InstanceCount': 9,
+ 'State': 'active',
+ },
+ {
+ 'ReservedInstancesId': 'res4',
+ 'InstanceType': 'it2',
+ 'AvailabilityZone': 'az2',
+ 'InstanceCount': 98,
+ 'State': 'active',
+ },
+ ]
+ }
+
+ @property
+ def test_find_usage_networking_eips(self):
+ mock_addr1 = Mock(spec_set=VpcAddress)
+ type(mock_addr1).domain = 'vpc'
+ mock_addr2 = Mock(spec_set=VpcAddress)
+ type(mock_addr2).domain = 'vpc'
+ mock_addr3 = Mock(spec_set=ClassicAddress)
+ type(mock_addr3).domain = 'standard'
+ return {
+ 'Classic': [mock_addr3],
+ 'Vpc': [mock_addr1, mock_addr2]
+ }
+
+ @property
+ def test_find_usage_networking_eni_sg(self):
+ mock_if1 = Mock(spec_set=NetworkInterface)
+ type(mock_if1).id = 'if-1'
+ type(mock_if1).groups = []
+ type(mock_if1).vpc = Mock()
+
+ mock_if2 = Mock(spec_set=NetworkInterface)
+ type(mock_if2).id = 'if-2'
+ type(mock_if2).groups = [1, 2, 3]
+ type(mock_if2).vpc = Mock()
+
+ mock_if3 = Mock(spec_set=NetworkInterface)
+ type(mock_if3).id = 'if-3'
+ type(mock_if3).groups = [1, 2, 3, 4, 5, 6, 7, 8]
+ type(mock_if3).vpc = Mock()
+
+ mock_if4 = Mock(spec_set=NetworkInterface)
+ type(mock_if4).id = 'if-4'
+ type(mock_if4).groups = [1, 2, 3, 4, 5, 6, 7, 8]
+ type(mock_if4).vpc = None
+ return [mock_if1, mock_if2, mock_if3, mock_if4]
+
+ test_update_limits_from_api = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 200,
+ 'RequestId': '16b85906-ab0d-4134-b8bb-df3e6120c6c7'
+ },
+ 'AccountAttributes': [
+ {
+ 'AttributeName': 'supported-platforms',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': 'EC2'
+ },
+ {
+ 'AttributeValue': 'VPC'
+ }
+ ]
+ },
+ {
+ 'AttributeName': 'vpc-max-security-groups-per-interface',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': '5'
+ }
+ ]
+ },
+ {
+ 'AttributeName': 'max-elastic-ips',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': '40'
+ }
+ ]
+ },
+ {
+ 'AttributeName': 'max-instances',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': '400'
+ }
+ ]
+ },
+ {
+ 'AttributeName': 'vpc-max-elastic-ips',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': '200'
+ }
+ ]
+ },
+ {
+ 'AttributeName': 'default-vpc',
+ 'AttributeValues': [
+ {
+ 'AttributeValue': 'none'
+ }
+ ]
+ }
+ ]
+ }
diff --git a/awslimitchecker/tests/services/test_autoscaling.py b/awslimitchecker/tests/services/test_autoscaling.py
index 42a42858..1d223d54 100644
--- a/awslimitchecker/tests/services/test_autoscaling.py
+++ b/awslimitchecker/tests/services/test_autoscaling.py
@@ -38,8 +38,6 @@
"""
import sys
-from boto.ec2.autoscale import AutoScaleConnection, connect_to_region
-from boto.ec2.autoscale.limits import AccountLimits
from awslimitchecker.services.autoscaling import _AutoscalingService
# https://code.google.com/p/mock/issues/detail?id=249
@@ -67,50 +65,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _AutoscalingService(21, 43)
- with patch('%s.boto.connect_autoscale' % self.pbm) as mock_autoscaling:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_autoscaling.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_autoscaling.mock_calls == [call()]
- assert mock_connect_via.mock_calls == []
- assert mock_conn.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _AutoscalingService(21, 43, region='myreg')
- with patch('%s.boto.connect_autoscale' % self.pbm) as mock_autoscaling:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_autoscaling.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_autoscaling.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
- assert mock_conn.mock_calls == []
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _AutoscalingService(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.autoscaling.boto.connect_'
- 'autoscale') as mock_autoscaling:
- mock_autoscaling.return_value = mock_conn
- cls.connect()
- assert mock_autoscaling.mock_calls == []
- assert mock_conn.mock_calls == []
-
def test_get_limits(self):
cls = _AutoscalingService(21, 43)
cls.limits = {}
@@ -133,13 +87,24 @@ def test_get_limits_again(self):
assert res == mock_limits
def test_find_usage(self):
- mock_conn = Mock(spec_set=AutoScaleConnection)
+ mock_conn = Mock()
def se_wrapper(func, *args, **kwargs):
- if func == mock_conn.get_all_groups:
- return [1, 2, 3]
- elif func == mock_conn.get_all_launch_configurations:
- return [1, 2]
+ if func == mock_conn.describe_auto_scaling_groups:
+ return {
+ 'AutoScalingGroups': [
+ {'AutoScalingGroupName': 'foo'},
+ {'AutoScalingGroupName': 'bar'},
+ {'AutoScalingGroupName': 'baz'},
+ ],
+ }
+ elif func == mock_conn.describe_launch_configurations:
+ return {
+ 'LaunchConfigurations': [
+ {'LaunchConfigurationName': 'foo'},
+ {'LaunchConfigurationName': 'bar'},
+ ],
+ }
return None
with patch('%s.connect' % self.pb) as mock_connect:
@@ -152,8 +117,18 @@ def se_wrapper(func, *args, **kwargs):
assert mock_connect.mock_calls == [call()]
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_groups),
- call(mock_conn.get_all_launch_configurations)
+ call(
+ mock_conn.describe_auto_scaling_groups,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['AutoScalingGroups'],
+ alc_marker_param='NextToken'
+ ),
+ call(
+ mock_conn.describe_launch_configurations,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['LaunchConfigurations'],
+ alc_marker_param='NextToken'
+ )
]
assert cls._have_usage is True
asgs = sorted(cls.limits['Auto Scaling groups'].get_current_usage())
@@ -172,10 +147,13 @@ def test_required_iam_permissions(self):
]
def test_update_limits_from_api(self):
- mock_conn = Mock(spec_set=AutoScaleConnection)
- aslimits = AccountLimits(connection=mock_conn)
- aslimits.max_autoscaling_groups = 11
- aslimits.max_launch_configurations = 22
+ mock_conn = Mock()
+ aslimits = {
+ 'MaxNumberOfAutoScalingGroups': 11,
+ 'MaxNumberOfLaunchConfigurations': 22,
+ 'NumberOfAutoScalingGroups': 5,
+ 'NumberOfLaunchConfigurations': 6
+ }
with patch('%s.connect' % self.pb) as mock_connect:
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
@@ -184,6 +162,8 @@ def test_update_limits_from_api(self):
mock_wrapper.return_value = aslimits
cls._update_limits_from_api()
assert mock_connect.mock_calls == [call()]
- assert mock_wrapper.mock_calls == [call(mock_conn.get_account_limits)]
+ assert mock_wrapper.mock_calls == [
+ call(mock_conn.describe_account_limits)
+ ]
assert cls.limits['Auto Scaling groups'].api_limit == 11
assert cls.limits['Launch configurations'].api_limit == 22
diff --git a/awslimitchecker/tests/services/test_base.py b/awslimitchecker/tests/services/test_base.py
index 99e958cf..d8576616 100644
--- a/awslimitchecker/tests/services/test_base.py
+++ b/awslimitchecker/tests/services/test_base.py
@@ -58,6 +58,7 @@ class AwsServiceTester(_AwsService):
"""class to test non-abstract methods on base class"""
service_name = 'AwsServiceTester'
+ api_name = 'awsservicetester'
def connect(self):
pass
@@ -286,6 +287,7 @@ def verify_subclass(self, clsname, cls):
assert mock_get_limits.mock_calls == [call()]
# ensure service name is changed
assert inst.service_name != 'baseclass'
+ assert inst.api_name != 'baseclass'
# ensure an IAM permissions list, even if empty
assert isinstance(inst.required_iam_permissions(), list)
# ensure warning and critical thresholds
diff --git a/awslimitchecker/tests/services/test_ebs.py b/awslimitchecker/tests/services/test_ebs.py
index 35eb30b0..486b8887 100644
--- a/awslimitchecker/tests/services/test_ebs.py
+++ b/awslimitchecker/tests/services/test_ebs.py
@@ -38,12 +38,9 @@
"""
import sys
-from boto.ec2.connection import EC2Connection
-from boto.ec2.volume import Volume
-from boto.ec2.snapshot import Snapshot
-from boto.ec2 import connect_to_region
from awslimitchecker.services.ebs import _EbsService
from awslimitchecker.limit import AwsLimit
+from awslimitchecker.tests.services import result_fixtures
# https://code.google.com/p/mock/issues/detail?id=249
# py>=3.4 should use unittest.mock not the mock package on pypi
@@ -69,49 +66,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _EbsService(21, 43)
- with patch('%s.boto.connect_ec2' % self.pbm) as mock_ec2:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_ec2.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_ec2.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _EbsService(21, 43, region='foo')
- with patch('%s.boto.connect_ec2' % self.pbm) as mock_ec2:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_ec2.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_ec2.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [call(connect_to_region)]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _EbsService(21, 43)
- cls.conn = mock_conn
- with patch('%s.boto.connect_ec2' % self.pbm) as mock_ec2:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_ec2.return_value = mock_conn
- cls.connect()
- assert mock_ec2.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
-
def test_get_limits_again(self):
"""test that existing limits dict is returned on subsequent calls"""
cls = _EbsService(21, 43)
@@ -168,67 +122,14 @@ def test_find_usage(self):
assert mocks[m].mock_calls == [call(cls)]
def test_find_usage_ebs(self):
- # 500G magnetic
- mock_vol1 = Mock(spec_set=Volume)
- type(mock_vol1).id = 'vol-1'
- type(mock_vol1).type = 'standard' # magnetic
- type(mock_vol1).size = 500
- type(mock_vol1).iops = None
+ response = result_fixtures.EBS.test_find_usage_ebs
- # 8G magnetic
- mock_vol2 = Mock(spec_set=Volume)
- type(mock_vol2).id = 'vol-2'
- type(mock_vol2).type = 'standard' # magnetic
- type(mock_vol2).size = 8
- type(mock_vol2).iops = None
-
- # 15G general purpose SSD, 45 IOPS
- mock_vol3 = Mock(spec_set=Volume)
- type(mock_vol3).id = 'vol-3'
- type(mock_vol3).type = 'gp2'
- type(mock_vol3).size = 15
- type(mock_vol3).iops = 45
-
- # 30G general purpose SSD, 90 IOPS
- mock_vol4 = Mock(spec_set=Volume)
- type(mock_vol4).id = 'vol-4'
- type(mock_vol4).type = 'gp2'
- type(mock_vol4).size = 30
- type(mock_vol4).iops = 90
-
- # 400G PIOPS, 700 IOPS
- mock_vol5 = Mock(spec_set=Volume)
- type(mock_vol5).id = 'vol-5'
- type(mock_vol5).type = 'io1'
- type(mock_vol5).size = 400
- type(mock_vol5).iops = 700
-
- # 100G PIOPS, 300 IOPS
- mock_vol6 = Mock(spec_set=Volume)
- type(mock_vol6).id = 'vol-6'
- type(mock_vol6).type = 'io1'
- type(mock_vol6).size = 100
- type(mock_vol6).iops = 300
-
- mock_vol7 = Mock(spec_set=Volume)
- type(mock_vol7).id = 'vol-7'
- type(mock_vol7).type = 'othertype'
-
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_vol1,
- mock_vol2,
- mock_vol3,
- mock_vol4,
- mock_vol5,
- mock_vol6,
- mock_vol7
- ]
+ mock_conn = Mock()
cls = _EbsService(21, 43)
cls.conn = mock_conn
with patch('awslimitchecker.services.ebs.logger') as mock_logger:
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
+ mock_wrapper.return_value = response
cls._find_usage_ebs()
assert mock_logger.mock_calls == [
call.debug("Getting usage for EBS volumes"),
@@ -256,30 +157,24 @@ def test_find_usage_ebs(self):
''].get_current_usage()[0].get_value() == 7
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_volumes)
+ call(
+ mock_conn.describe_volumes,
+ alc_marker_path=['NextToken'],
+ alc_data_path=['Volumes'],
+ alc_marker_param='NextToken'
+ )
]
def test_find_usage_snapshots(self):
- mock_snap1 = Mock(spec_set=Snapshot)
- type(mock_snap1).id = 'snap-1'
-
- mock_snap2 = Mock(spec_set=Snapshot)
- type(mock_snap2).id = 'snap-2'
+ response = result_fixtures.EBS.test_find_usage_snapshots
- mock_snap3 = Mock(spec_set=Snapshot)
- type(mock_snap3).id = 'snap-3'
+ mock_conn = Mock()
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_snap1,
- mock_snap2,
- mock_snap3,
- ]
cls = _EbsService(21, 43)
cls.conn = mock_conn
with patch('awslimitchecker.services.ebs.logger') as mock_logger:
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
+ mock_wrapper.return_value = response
cls._find_usage_snapshots()
assert mock_logger.mock_calls == [
call.debug("Getting usage for EBS snapshots"),
@@ -289,7 +184,13 @@ def test_find_usage_snapshots(self):
''].get_current_usage()[0].get_value() == 3
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_snapshots, owner='self')
+ call(
+ mock_conn.describe_snapshots,
+ OwnerIds=['self'],
+ alc_marker_path=['NextToken'],
+ alc_data_path=['Snapshots'],
+ alc_marker_param='NextToken'
+ )
]
def test_required_iam_permissions(self):
diff --git a/awslimitchecker/tests/services/test_ec2.py b/awslimitchecker/tests/services/test_ec2.py
index 19bb0769..1810f9bc 100644
--- a/awslimitchecker/tests/services/test_ec2.py
+++ b/awslimitchecker/tests/services/test_ec2.py
@@ -38,15 +38,7 @@
"""
import sys
-from boto.ec2.connection import EC2Connection
-from boto.ec2.instance import Instance, Reservation
-from boto.ec2.reservedinstance import ReservedInstance
-from boto.ec2.securitygroup import SecurityGroup
-from boto.ec2.address import Address
-from boto.ec2.networkinterface import NetworkInterface
-from boto.ec2 import connect_to_region
-from boto.ec2.attributes import AccountAttribute
-from boto.resultset import ResultSet
+from awslimitchecker.tests.services import result_fixtures
from awslimitchecker.services.ec2 import _Ec2Service
from awslimitchecker.limit import AwsLimit
@@ -60,6 +52,8 @@
else:
from unittest.mock import patch, call, Mock, DEFAULT
+fixtures = result_fixtures.EC2()
+
class Test_Ec2Service(object):
@@ -71,50 +65,10 @@ def test_init(self):
cls = _Ec2Service(21, 43)
assert cls.service_name == 'EC2'
assert cls.conn is None
+ assert cls.resource_conn is None
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _Ec2Service(21, 43)
- with patch('%s.boto.connect_ec2' % self.pbm) as mock_ec2:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_ec2.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_ec2.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _Ec2Service(21, 43, region='bar')
- with patch('%s.boto.connect_ec2' % self.pbm) as mock_ec2:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_ec2.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_ec2.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [call(connect_to_region)]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.ec2.boto.connect_ec2') as mock_ec2:
- mock_ec2.return_value = mock_conn
- cls.connect()
- assert mock_ec2.mock_calls == []
- assert mock_conn.mock_calls == []
-
def test_instance_types(self):
cls = _Ec2Service(21, 43)
types = cls._instance_types()
@@ -218,82 +172,23 @@ def test_instance_usage(self):
}
cls = _Ec2Service(21, 43)
- mock_inst1A = Mock(spec_set=Instance)
- type(mock_inst1A).id = '1A'
- type(mock_inst1A).instance_type = 't2.micro'
- type(mock_inst1A).spot_instance_request_id = None
- type(mock_inst1A).placement = 'az1a'
- type(mock_inst1A).state = 'running'
-
- mock_inst1B = Mock(spec_set=Instance)
- type(mock_inst1B).id = '1B'
- type(mock_inst1B).instance_type = 'r3.2xlarge'
- type(mock_inst1B).spot_instance_request_id = None
- type(mock_inst1B).placement = 'az1a'
- type(mock_inst1B).state = 'pending'
-
- mock_res1 = Mock(spec_set=Reservation)
- type(mock_res1).instances = [mock_inst1A, mock_inst1B]
-
- mock_inst2A = Mock(spec_set=Instance)
- type(mock_inst2A).id = '2A'
- type(mock_inst2A).instance_type = 'c4.4xlarge'
- type(mock_inst2A).spot_instance_request_id = None
- type(mock_inst2A).placement = 'az1a'
- type(mock_inst2A).state = 'shutting-down'
-
- mock_inst2B = Mock(spec_set=Instance)
- type(mock_inst2B).id = '2B'
- type(mock_inst2B).instance_type = 't2.micro'
- type(mock_inst2B).spot_instance_request_id = '1234'
- type(mock_inst2B).placement = 'az1a'
- type(mock_inst2B).state = 'stopping'
-
- mock_inst2C = Mock(spec_set=Instance)
- type(mock_inst2C).id = '2C'
- type(mock_inst2C).instance_type = 'm4.8xlarge'
- type(mock_inst2C).spot_instance_request_id = None
- type(mock_inst2C).placement = 'az1a'
- type(mock_inst2C).state = 'running'
-
- mock_instStopped = Mock(spec_set=Instance)
- type(mock_instStopped).id = '2C'
- type(mock_instStopped).instance_type = 'm4.8xlarge'
- type(mock_instStopped).spot_instance_request_id = None
- type(mock_instStopped).placement = 'az1a'
- type(mock_instStopped).state = 'stopped'
-
- mock_instTerm = Mock(spec_set=Instance)
- type(mock_instTerm).id = '2C'
- type(mock_instTerm).instance_type = 'm4.8xlarge'
- type(mock_instTerm).spot_instance_request_id = None
- type(mock_instTerm).placement = 'az1a'
- type(mock_instTerm).state = 'terminated'
-
- mock_res2 = Mock(spec_set=Reservation)
- type(mock_res2).instances = [
- mock_inst2A, mock_inst2B, mock_inst2C, mock_instStopped,
- mock_instTerm
- ]
+ mock_conn = Mock()
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_res1,
- mock_res2
- ]
- cls.conn = mock_conn
+ retval = fixtures.test_instance_usage
+ mock_conn.instances.all.return_value = retval
+
+ cls.resource_conn = mock_conn
cls.limits = limits
+
with patch('awslimitchecker.services.ec2._Ec2Service._instance_types',
autospec=True) as mock_itypes:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- mock_itypes.return_value = [
- 't2.micro',
- 'r3.2xlarge',
- 'c4.4xlarge',
- 'm4.8xlarge',
- ]
- res = cls._instance_usage()
+ mock_itypes.return_value = [
+ 't2.micro',
+ 'r3.2xlarge',
+ 'c4.4xlarge',
+ 'm4.8xlarge',
+ ]
+ res = cls._instance_usage()
assert res == {
'az1a': {
't2.micro': 1,
@@ -302,51 +197,21 @@ def test_instance_usage(self):
'm4.8xlarge': 1,
}
}
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_reservations)
+ assert mock_conn.mock_calls == [
+ call.instances.all()
]
def test_get_reserved_instance_count(self):
- mock_res1 = Mock(spec_set=ReservedInstance)
- type(mock_res1).state = 'active'
- type(mock_res1).id = 'res1'
- type(mock_res1).availability_zone = 'az1'
- type(mock_res1).instance_type = 'it1'
- type(mock_res1).instance_count = 1
-
- mock_res2 = Mock(spec_set=ReservedInstance)
- type(mock_res2).state = 'inactive'
- type(mock_res2).id = 'res2'
- type(mock_res2).availability_zone = 'az1'
- type(mock_res2).instance_type = 'it2'
- type(mock_res2).instance_count = 1
-
- mock_res3 = Mock(spec_set=ReservedInstance)
- type(mock_res3).state = 'active'
- type(mock_res3).id = 'res3'
- type(mock_res3).availability_zone = 'az1'
- type(mock_res3).instance_type = 'it1'
- type(mock_res3).instance_count = 9
-
- mock_res4 = Mock(spec_set=ReservedInstance)
- type(mock_res4).state = 'active'
- type(mock_res4).id = 'res4'
- type(mock_res4).availability_zone = 'az2'
- type(mock_res4).instance_type = 'it2'
- type(mock_res4).instance_count = 98
+ response = fixtures.test_get_reserved_instance_count
cls = _Ec2Service(21, 43)
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_res1,
- mock_res2,
- mock_res3,
- mock_res4
- ]
- cls.conn = mock_conn
+ mock_client_conn = Mock()
+ cls.conn = mock_client_conn
+ mock_conn = Mock()
+ cls.resource_conn = mock_conn
+
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
+ mock_wrapper.return_value = response
res = cls._get_reserved_instance_count()
assert res == {
'az1': {
@@ -357,8 +222,10 @@ def test_get_reserved_instance_count(self):
},
}
assert mock_conn.mock_calls == []
+ assert mock_client_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_reserved_instances)
+ call(mock_client_conn.describe_reserved_instances,
+ alc_no_paginate=True)
]
def test_find_usage_instances(self):
@@ -401,13 +268,12 @@ def test_find_usage_instances(self):
}
cls = _Ec2Service(21, 43)
- mock_conn = Mock(spec_set=EC2Connection)
- cls.conn = mock_conn
+ mock_conn = Mock()
+ cls.resource_conn = mock_conn
cls.limits = limits
- with patch('awslimitchecker.services.ec2._Ec2Service.'
- '_instance_usage', autospec=True) as mock_inst_usage:
- with patch('awslimitchecker.services.ec2._Ec2Service.'
- '_get_reserved_instance_count',
+ with patch('%s._instance_usage' % self.pb,
+ autospec=True) as mock_inst_usage:
+ with patch('%s._get_reserved_instance_count' % self.pb,
autospec=True) as mock_res_inst_count:
mock_inst_usage.return_value = iusage
mock_res_inst_count.return_value = ri_count
@@ -432,34 +298,28 @@ def test_find_usage_instances(self):
assert mock_res_inst_count.mock_calls == [call(cls)]
assert mock_conn.mock_calls == []
- def test_find_usage_instances_key_error(self):
- mock_inst1A = Mock(spec_set=Instance)
- type(mock_inst1A).id = '1A'
- type(mock_inst1A).instance_type = 'foobar'
- type(mock_inst1A).spot_instance_request_id = None
- mock_res1 = Mock(spec_set=Reservation)
- type(mock_res1).instances = [mock_inst1A]
-
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [mock_res1]
+ def test_instance_usage_key_error(self):
+ mock_conn = Mock()
+ data = fixtures.test_instance_usage_key_error
+ mock_conn.instances.all.return_value = data
cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
+ cls.resource_conn = mock_conn
cls.limits = {'Running On-Demand t2.micro instances': Mock()}
+
with patch(
'%s._instance_types' % self.pb,
autospec=True) as mock_itypes:
with patch('awslimitchecker.services.ec2.logger') as mock_logger:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- mock_itypes.return_value = ['t2.micro']
- cls._instance_usage()
+ mock_itypes.return_value = ['t2.micro']
+ cls._instance_usage()
assert mock_logger.mock_calls == [
call.debug('Getting usage for on-demand instances'),
call.error("ERROR - unknown instance type '%s'; not counting",
'foobar'),
]
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [call(mock_conn.get_all_reservations)]
+ assert mock_conn.mock_calls == [
+ call.instances.all()
+ ]
def test_required_iam_permissions(self):
cls = _Ec2Service(21, 43)
@@ -481,36 +341,16 @@ def test_required_iam_permissions(self):
]
def test_find_usage_networking_sgs(self):
- mock_sg1 = Mock(spec_set=SecurityGroup)
- type(mock_sg1).id = 'sg-1'
- type(mock_sg1).vpc_id = 'vpc-aaa'
- type(mock_sg1).rules = []
- mock_sg2 = Mock(spec_set=SecurityGroup)
- type(mock_sg2).id = 'sg-2'
- type(mock_sg2).vpc_id = None
- type(mock_sg2).rules = [1, 2, 3, 4, 5, 6]
- mock_sg3 = Mock(spec_set=SecurityGroup)
- type(mock_sg3).id = 'sg-3'
- type(mock_sg3).vpc_id = 'vpc-bbb'
- type(mock_sg3).rules = [1, 2, 3, 4, 5, 6, 7, 8, 9]
- mock_sg4 = Mock(spec_set=SecurityGroup)
- type(mock_sg4).id = 'sg-4'
- type(mock_sg4).vpc_id = 'vpc-aaa'
- type(mock_sg4).rules = [1, 2, 3]
-
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_sg1,
- mock_sg2,
- mock_sg3,
- mock_sg4,
- ]
+ mocks = fixtures.test_find_usage_networking_sgs
+
+ mock_conn = Mock()
+ mock_conn.security_groups.all.return_value = mocks
+
cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
+ cls.resource_conn = mock_conn
+
with patch('awslimitchecker.services.ec2.logger') as mock_logger:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- cls._find_usage_networking_sgs()
+ cls._find_usage_networking_sgs()
assert mock_logger.mock_calls == [
call.debug("Getting usage for EC2 VPC resources"),
]
@@ -539,31 +379,21 @@ def test_find_usage_networking_sgs(self):
assert sorted_usage[2].limit == limit
assert sorted_usage[2].resource_id == 'sg-3'
assert sorted_usage[2].get_value() == 9
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_security_groups)
+ assert mock_conn.mock_calls == [
+ call.security_groups.all()
]
def test_find_usage_networking_eips(self):
- mock_addr1 = Mock(spec_set=Address)
- type(mock_addr1).domain = 'vpc'
- mock_addr2 = Mock(spec_set=Address)
- type(mock_addr2).domain = 'vpc'
- mock_addr3 = Mock(spec_set=Address)
- type(mock_addr3).domain = 'standard'
-
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_addr1,
- mock_addr2,
- mock_addr3,
- ]
+ mocks = fixtures.test_find_usage_networking_eips
+
+ mock_conn = Mock()
+ mock_conn.classic_addresses.all.return_value = mocks['Classic']
+ mock_conn.vpc_addresses.all.return_value = mocks['Vpc']
cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
+ cls.resource_conn = mock_conn
+
with patch('awslimitchecker.services.ec2.logger') as mock_logger:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- cls._find_usage_networking_eips()
+ cls._find_usage_networking_eips()
assert mock_logger.mock_calls == [
call.debug("Getting usage for EC2 EIPs"),
]
@@ -582,34 +412,21 @@ def test_find_usage_networking_eips(self):
assert usage[0].get_value() == 1
assert usage[0].resource_id is None
assert usage[0].aws_type == 'AWS::EC2::EIP'
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_addresses)
+
+ assert mock_conn.mock_calls == [
+ call.vpc_addresses.all(),
+ call.classic_addresses.all()
]
def test_find_usage_networking_eni_sg(self):
- mock_if1 = Mock(spec_set=NetworkInterface)
- type(mock_if1).id = 'if-1'
- type(mock_if1).groups = []
- mock_if2 = Mock(spec_set=NetworkInterface)
- type(mock_if2).id = 'if-2'
- type(mock_if2).groups = [1, 2, 3]
- mock_if3 = Mock(spec_set=NetworkInterface)
- type(mock_if3).id = 'if-3'
- type(mock_if3).groups = [1, 2, 3, 4, 5, 6, 7, 8]
-
- mock_conn = Mock(spec_set=EC2Connection)
- return_value = [
- mock_if1,
- mock_if2,
- mock_if3,
- ]
+ mocks = fixtures.test_find_usage_networking_eni_sg
+
+ mock_conn = Mock()
+ mock_conn.network_interfaces.all.return_value = mocks
cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
+ cls.resource_conn = mock_conn
with patch('awslimitchecker.services.ec2.logger') as mock_logger:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- cls._find_usage_networking_eni_sg()
+ cls._find_usage_networking_eni_sg()
assert mock_logger.mock_calls == [
call.debug("Getting usage for EC2 Network Interfaces"),
]
@@ -625,9 +442,8 @@ def test_find_usage_networking_eni_sg(self):
assert sorted_usage[2].limit == limit
assert sorted_usage[2].resource_id == 'if-3'
assert sorted_usage[2].get_value() == 8
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_network_interfaces)
+ assert mock_conn.mock_calls == [
+ call.network_interfaces.all()
]
def test_get_limits_networking(self):
@@ -643,44 +459,20 @@ def test_get_limits_networking(self):
assert sorted(limits.keys()) == sorted(expected)
def test_update_limits_from_api(self):
- mock_conn = Mock(spec_set=EC2Connection)
-
- rs = ResultSet()
- a1 = AccountAttribute(connection=mock_conn)
- a1.attribute_name = 'supported-platforms'
- a1.attribute_values = ['EC2', 'VPC']
- rs.append(a1)
- a2 = AccountAttribute(connection=mock_conn)
- a2.attribute_name = 'vpc-max-security-groups-per-interface'
- a2.attribute_values = ['5']
- rs.append(a2)
- a3 = AccountAttribute(connection=mock_conn)
- a3.attribute_name = 'max-elastic-ips'
- a3.attribute_values = ['40']
- rs.append(a3)
- a4 = AccountAttribute(connection=mock_conn)
- a4.attribute_name = 'max-instances'
- a4.attribute_values = ['400']
- rs.append(a4)
- a5 = AccountAttribute(connection=mock_conn)
- a5.attribute_name = 'vpc-max-elastic-ips'
- a5.attribute_values = ['200']
- rs.append(a5)
- a6 = AccountAttribute(connection=mock_conn)
- a6.attribute_name = 'default-vpc'
- a6.attribute_values = ['none']
- rs.append(a6)
+ data = fixtures.test_update_limits_from_api
+ mock_conn = Mock()
+ mock_client_conn = Mock()
+ mock_client_conn.describe_account_attributes.return_value = data
cls = _Ec2Service(21, 43)
- cls.conn = mock_conn
+ cls.resource_conn = mock_conn
+ cls.conn = mock_client_conn
with patch('awslimitchecker.services.ec2.logger') as mock_logger:
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = rs
- cls._update_limits_from_api()
- assert mock_wrapper.mock_calls == [
- call(mock_conn.describe_account_attributes)
- ]
+ cls._update_limits_from_api()
assert mock_conn.mock_calls == []
+ assert mock_client_conn.mock_calls == [
+ call.describe_account_attributes()
+ ]
assert mock_logger.mock_calls == [
call.info("Querying EC2 DescribeAccountAttributes for limits"),
call.debug('Done setting limits from API')
diff --git a/awslimitchecker/tests/services/test_elasticache.py b/awslimitchecker/tests/services/test_elasticache.py
index c3d02fd4..6d4c234b 100644
--- a/awslimitchecker/tests/services/test_elasticache.py
+++ b/awslimitchecker/tests/services/test_elasticache.py
@@ -38,10 +38,10 @@
"""
import sys
-from boto.elasticache.layer1 import ElastiCacheConnection
-from boto.elasticache import connect_to_region
-from boto.exception import BotoServerError
+from awslimitchecker.tests.services import result_fixtures
+from botocore.exceptions import ClientError
from awslimitchecker.services.elasticache import _ElastiCacheService
+import pytest
# https://code.google.com/p/mock/issues/detail?id=249
# py>=3.4 should use unittest.mock not the mock package on pypi
@@ -67,52 +67,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock(spec_set=ElastiCacheConnection)
- mock_conn_via = Mock(spec_set=ElastiCacheConnection)
- cls = _ElastiCacheService(21, 43)
- with patch('%s.ElastiCacheConnection' % self.pbm) as mock_elasticache:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elasticache.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_elasticache.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock(spec_set=ElastiCacheConnection)
- mock_conn_via = Mock(spec_set=ElastiCacheConnection)
- cls = _ElastiCacheService(21, 43, region='foo')
- with patch('%s.ElastiCacheConnection' % self.pbm) as mock_elasticache:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elasticache.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_elasticache.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _ElastiCacheService(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.elasticache.ElastiCacheConnection'
- '') as mock_elasticache:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elasticache.return_value = mock_conn
- cls.connect()
- assert mock_elasticache.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
-
def test_get_limits(self):
cls = _ElastiCacheService(21, 43)
cls.limits = {}
@@ -140,7 +94,7 @@ def test_get_limits_again(self):
def test_find_usage(self):
"""overall find usage method"""
- mock_conn = Mock(spec_set=ElastiCacheConnection)
+ mock_conn = Mock()
with patch('%s.connect' % self.pb) as mock_connect:
with patch.multiple(
@@ -167,166 +121,16 @@ def test_find_usage(self):
def test_find_usage_nodes(self):
"""test find usage for nodes"""
- clusters = [
- {
- 'Engine': 'memcached',
- 'CacheParameterGroup': {
- 'CacheNodeIdsToReboot': [],
- 'CacheParameterGroupName': 'default.memcached1.4',
- 'ParameterApplyStatus': 'in-sync'
- },
- 'CacheClusterId': 'memcached1',
- 'CacheSecurityGroups': [],
- 'ConfigurationEndpoint': {
- 'Port': 11211,
- 'Address': 'memcached1.vfavzi.cfg.use1.'
- 'cache.amazonaws.com'
- },
- 'CacheClusterCreateTime': 1431109646.755,
- 'ReplicationGroupId': None,
- 'AutoMinorVersionUpgrade': True,
- 'CacheClusterStatus': 'available',
- 'NumCacheNodes': 1,
- 'PreferredAvailabilityZone': 'us-east-1d',
- 'SecurityGroups': [
- {
- 'Status': 'active',
- 'SecurityGroupId': 'sg-11111111'
- }
- ],
- 'CacheSubnetGroupName': 'csg-memcached1',
- 'EngineVersion': '1.4.14',
- 'PendingModifiedValues': {
- 'NumCacheNodes': None,
- 'EngineVersion': None,
- 'CacheNodeIdsToRemove': None
- },
- 'CacheNodeType': 'cache.t2.small',
- 'NotificationConfiguration': None,
- 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
- 'CacheNodes': [
- {
- 'CacheNodeId': '0001',
- 'Endpoint': {
- 'Port': 11211,
- 'Address': 'memcached1.vfavzi.0001.'
- 'use1.cache.amazonaws.com'
- },
- 'CacheNodeStatus': 'available',
- 'ParameterGroupStatus': 'in-sync',
- 'CacheNodeCreateTime': 1431109646.755,
- 'SourceCacheNodeId': None
- }
- ]
- },
- {
- 'Engine': 'redis',
- 'CacheParameterGroup': {
- 'CacheNodeIdsToReboot': [],
- 'CacheParameterGroupName': 'default.redis2.8',
- 'ParameterApplyStatus': 'in-sync'
- },
- 'CacheClusterId': 'redis1',
- 'CacheSecurityGroups': [
- {
- 'Status': 'active',
- 'CacheSecurityGroupName': 'csg-redis1'
- }
- ],
- 'ConfigurationEndpoint': None,
- 'CacheClusterCreateTime': 1412253787.914,
- 'ReplicationGroupId': None,
- 'AutoMinorVersionUpgrade': True,
- 'CacheClusterStatus': 'available',
- 'NumCacheNodes': 2,
- 'PreferredAvailabilityZone': 'us-east-1a',
- 'SecurityGroups': None,
- 'CacheSubnetGroupName': None,
- 'EngineVersion': '2.8.6',
- 'PendingModifiedValues': {
- 'NumCacheNodes': None,
- 'EngineVersion': None,
- 'CacheNodeIdsToRemove': None
- },
- 'CacheNodeType': 'cache.m3.medium',
- 'NotificationConfiguration': None,
- 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
- 'CacheNodes': [
- {
- 'CacheNodeId': '0001',
- 'Endpoint': {
- 'Port': 6379,
- 'Address': 'redis1.vfavzi.0001.use1.cache.'
- 'amazonaws.com'
- },
- 'CacheNodeStatus': 'available',
- 'ParameterGroupStatus': 'in-sync',
- 'CacheNodeCreateTime': 1412253787.914,
- 'SourceCacheNodeId': None
- },
- {
- 'CacheNodeId': '0002',
- 'Endpoint': {
- 'Port': 6379,
- 'Address': 'redis1.vfavzi.0002.use1.cache.'
- 'amazonaws.com'
- },
- 'CacheNodeStatus': 'available',
- 'ParameterGroupStatus': 'in-sync',
- 'CacheNodeCreateTime': 1412253787.914,
- 'SourceCacheNodeId': None
- }
- ]
- },
- {
- 'Engine': 'redis',
- 'CacheParameterGroup': {
- 'CacheNodeIdsToReboot': [],
- 'CacheParameterGroupName': 'default.redis2.8',
- 'ParameterApplyStatus': 'in-sync'
- },
- 'CacheClusterId': 'redis2',
- 'CacheSecurityGroups': [
- {
- 'Status': 'active',
- 'CacheSecurityGroupName': 'csg-redis2'
- }
- ],
- 'ConfigurationEndpoint': None,
- 'CacheClusterCreateTime': 1412253787.123,
- 'ReplicationGroupId': None,
- 'AutoMinorVersionUpgrade': True,
- 'CacheClusterStatus': 'available',
- 'NumCacheNodes': 4,
- 'PreferredAvailabilityZone': 'us-east-1a',
- 'SecurityGroups': None,
- 'CacheSubnetGroupName': None,
- 'EngineVersion': '2.8.6',
- 'PendingModifiedValues': {
- 'NumCacheNodes': None,
- 'EngineVersion': None,
- 'CacheNodeIdsToRemove': None
- },
- 'CacheNodeType': 'cache.m3.medium',
- 'NotificationConfiguration': None,
- 'PreferredMaintenanceWindow': 'mon:05:30-mon:06:30',
- 'CacheNodes': None,
- },
- ]
- resp = {
- 'DescribeCacheClustersResponse': {
- 'DescribeCacheClustersResult': {
- 'CacheClusters': clusters
- }
- }
- }
+ # this also tests pagination
+ responses = result_fixtures.ElastiCache.test_find_usage_nodes
- mock_conn = Mock(spec_set=ElastiCacheConnection)
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = responses
+ mock_conn.get_paginator.return_value = mock_paginator
cls = _ElastiCacheService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = resp
- cls._find_usage_nodes()
+ cls._find_usage_nodes()
usage = cls.limits['Nodes'].get_current_usage()
assert len(usage) == 1
@@ -345,126 +149,34 @@ def test_find_usage_nodes(self):
assert usage[2].get_value() == 4
assert usage[2].resource_id == 'redis2'
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_cache_clusters,
- show_cache_node_info=True,
- alc_marker_path=[
- 'DescribeCacheClustersResponse',
- 'DescribeCacheClustersResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheClustersResponse',
- 'DescribeCacheClustersResult',
- 'CacheClusters'
- ],
- alc_marker_param='marker'
- )
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_clusters'),
+ call.get_paginator().paginate(ShowCacheNodeInfo=True)
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate(ShowCacheNodeInfo=True)
]
def test_find_usage_subnet_groups(self):
"""test find usage for subnet groups"""
- data = {
- 'DescribeCacheSubnetGroupsResponse': {
- 'DescribeCacheSubnetGroupsResult': {
- 'CacheSubnetGroups': [
- {
- 'Subnets': [
- {
- 'SubnetIdentifier': 'subnet-62e8783b',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1d'}
- },
- {
- 'SubnetIdentifier': 'subnet-0b037e7c',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1a'
- }
- }
- ],
- 'CacheSubnetGroupName': 'break-memca-135tjabqoyywd',
- 'VpcId': 'vpc-73ec9716',
- 'CacheSubnetGroupDescription': 'memcached'
- },
- {
- 'Subnets': [
- {
- 'SubnetIdentifier': 'subnet-38e87861',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1d'
- }
- },
- {
- 'SubnetIdentifier': 'subnet-4f027f38',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1a'
- }
- }
- ],
- 'CacheSubnetGroupName': 'break-memca-6yi6axon9ol9',
- 'VpcId': 'vpc-1ee8937b',
- 'CacheSubnetGroupDescription': 'memcached'
- },
- {
- 'Subnets': [
- {
- 'SubnetIdentifier': 'subnet-49071f61',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1e'
- }
- },
- {
- 'SubnetIdentifier': 'subnet-6fe23c18',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1a'
- }
- },
- {
- 'SubnetIdentifier': 'subnet-a9b54df0',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1d'
- }
- }
- ],
- 'CacheSubnetGroupName': 'lsp-d-redis-14d9407dl05er',
- 'VpcId': 'vpc-c300b9a6',
- 'CacheSubnetGroupDescription': 'redis'
- },
- ],
- 'Marker': None
- },
- 'ResponseMetadata': {
- 'RequestId': '79654b0b-26ac-11e5-aaab-63850f3e3bca'
- }
- }
- }
+ # this also tests pagination
+ responses = result_fixtures.ElastiCache.test_find_usage_subnet_groups
- mock_conn = Mock(spec_set=ElastiCacheConnection)
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = responses
+ mock_conn.get_paginator.return_value = mock_paginator
cls = _ElastiCacheService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_subnet_groups()
+ cls._find_usage_subnet_groups()
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_cache_subnet_groups,
- alc_marker_path=[
- 'DescribeCacheSubnetGroupsResponse',
- 'DescribeCacheSubnetGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheSubnetGroupsResponse',
- 'DescribeCacheSubnetGroupsResult',
- 'CacheSubnetGroups'
- ],
- alc_marker_param='marker'
- )
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_subnet_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = cls.limits['Subnet Groups'].get_current_usage()
@@ -473,57 +185,23 @@ def test_find_usage_subnet_groups(self):
def test_find_usage_parameter_groups(self):
"""test find usage for parameter groups"""
- data = {
- 'DescribeCacheParameterGroupsResponse': {
- 'DescribeCacheParameterGroupsResult': {
- 'Marker': None,
- 'CacheParameterGroups': [
- {
- 'CacheParameterGroupName': 'default.memcached1.4',
- 'CacheParameterGroupFamily': 'memcached1.4',
- 'Description': 'Default for memcached1.4'
- },
- {
- 'CacheParameterGroupName': 'default.redis2.6',
- 'CacheParameterGroupFamily': 'redis2.6',
- 'Description': 'Default for redis2.6'
- },
- {
- 'CacheParameterGroupName': 'default.redis2.8',
- 'CacheParameterGroupFamily': 'redis2.8',
- 'Description': 'Default for redis2.8'
- }
- ]
- },
- 'ResponseMetadata': {
- 'RequestId': 'ab5d593f-26ac-11e5-a6dd-17ec0aded872'
- }
- }
- }
+ # this also tests pagination
+ responses = result_fixtures.ElastiCache.test_find_usage_parameter_groups
- mock_conn = Mock(spec_set=ElastiCacheConnection)
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = responses
+ mock_conn.get_paginator.return_value = mock_paginator
cls = _ElastiCacheService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_parameter_groups()
+ cls._find_usage_parameter_groups()
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_cache_parameter_groups,
- alc_marker_path=[
- 'DescribeCacheParameterGroupsResponse',
- 'DescribeCacheParameterGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheParameterGroupsResponse',
- 'DescribeCacheParameterGroupsResult',
- 'CacheParameterGroups'
- ],
- alc_marker_param='marker'
- )
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_parameter_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = cls.limits['Parameter Groups'].get_current_usage()
@@ -532,103 +210,112 @@ def test_find_usage_parameter_groups(self):
def test_find_usage_security_groups(self):
"""test find usage for security groups"""
- data = {
- 'DescribeCacheSecurityGroupsResponse': {
- 'DescribeCacheSecurityGroupsResult': {
- 'Marker': None,
- 'CacheSecurityGroups': [
- {
- 'OwnerId': '123456789012',
- 'CacheSecurityGroupName': 'default',
- 'Description': 'default',
- 'EC2SecurityGroups': []
- },
- {
- 'OwnerId': '123456789012',
- 'CacheSecurityGroupName': 'csg1',
- 'Description': 'foo bar',
- 'EC2SecurityGroups': [
- {
- 'EC2SecurityGroupName': 'ec2-sg1',
- 'Status': 'authorized',
- 'EC2SecurityGroupOwnerId': '123456789012'
- }
- ]
- }
- ]
- },
- 'ResponseMetadata': {
- 'RequestId': 'be15fa9c-26ac-11e5-a849-894e77ed58a8'
- }
- }
- }
+ # this also tests pagination
+ responses = result_fixtures.ElastiCache.test_find_usage_security_groups
- mock_conn = Mock(spec_set=ElastiCacheConnection)
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = responses
+ mock_conn.get_paginator.return_value = mock_paginator
cls = _ElastiCacheService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_security_groups()
+ cls._find_usage_security_groups()
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_cache_security_groups,
- alc_marker_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'CacheSecurityGroups'
- ],
- alc_marker_param='marker'
- )
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_security_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = cls.limits['Security Groups'].get_current_usage()
assert len(usage) == 1
assert usage[0].get_value() == 2
- def test_find_usage_security_groups_exception(self):
+ def test_find_usage_security_groups_no_ec2_classic(self):
"""test find usage for security groups"""
def se_exc(*args, **kwargs):
- raise BotoServerError(None, None, None)
+ resp = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 400,
+ 'RequestId': '7d74c6f0-c789-11e5-82fe-a96cdaa6d564'
+ },
+ 'Error': {
+ 'Message': 'Use of cache security groups is not permitted'
+ ' in this API version for your account.',
+ 'Code': 'InvalidParameterValue',
+ 'Type': 'Sender'
+ }
+ }
+ raise ClientError(resp, 'operation')
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.side_effect = se_exc
+ mock_conn.get_paginator.return_value = mock_paginator
- mock_conn = Mock(spec_set=ElastiCacheConnection)
- mock_conn.describe_cache_security_groups.side_effect = se_exc
cls = _ElastiCacheService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.side_effect = se_exc
+ with patch('%s.logger' % self.pbm) as mock_logger:
cls._find_usage_security_groups()
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_cache_security_groups,
- alc_marker_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeCacheSecurityGroupsResponse',
- 'DescribeCacheSecurityGroupsResult',
- 'CacheSecurityGroups'
- ],
- alc_marker_param='marker'
- )
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_security_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
+ ]
+ assert mock_logger.mock_calls == [
+ call.debug("caught ClientError checking ElastiCache security "
+ "groups (account without EC2-Classic?)")
]
usage = cls.limits['Security Groups'].get_current_usage()
assert len(usage) == 1
assert usage[0].get_value() == 0
+ def test_find_usage_security_groups_exception(self):
+ """test find usage for security groups"""
+ err_resp = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 400,
+ 'RequestId': '7d74c6f0-c789-11e5-82fe-a96cdaa6d564'
+ },
+ 'Error': {
+ 'Message': 'other message',
+ 'Code': 'OtherCode',
+ 'Type': 'Sender'
+ }
+ }
+ exc = ClientError(err_resp, 'operation')
+
+ def se_exc(*args, **kwargs):
+ raise exc
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.side_effect = se_exc
+ mock_conn.get_paginator.return_value = mock_paginator
+
+ cls = _ElastiCacheService(21, 43)
+ cls.conn = mock_conn
+
+ with pytest.raises(Exception) as raised:
+ cls._find_usage_security_groups()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_cache_security_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
+ ]
+ assert raised.value == exc
+
def test_required_iam_permissions(self):
cls = _ElastiCacheService(21, 43)
assert cls.required_iam_permissions() == [
diff --git a/awslimitchecker/tests/services/test_elb.py b/awslimitchecker/tests/services/test_elb.py
index df67403b..676d6599 100644
--- a/awslimitchecker/tests/services/test_elb.py
+++ b/awslimitchecker/tests/services/test_elb.py
@@ -38,9 +38,7 @@
"""
import sys
-from boto.ec2.elb import ELBConnection
-from boto.ec2.elb.loadbalancer import LoadBalancer
-from boto.ec2.elb import connect_to_region
+from awslimitchecker.tests.services import result_fixtures
from awslimitchecker.services.elb import _ElbService
# https://code.google.com/p/mock/issues/detail?id=249
@@ -67,51 +65,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _ElbService(21, 43)
- with patch('%s.boto.connect_elb' % self.pbm) as mock_elb:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elb.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_elb.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _ElbService(21, 43, region='myregion')
- with patch('%s.boto.connect_elb' % self.pbm) as mock_elb:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elb.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_elb.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _ElbService(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.elb.boto.connect_elb') as mock_elb:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_elb.return_value = mock_conn
- cls.connect()
- assert mock_elb.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
-
def test_get_limits(self):
cls = _ElbService(21, 43)
cls.limits = {}
@@ -134,29 +87,9 @@ def test_get_limits_again(self):
assert res == mock_limits
def test_find_usage(self):
- mock_elb1 = Mock(spec_set=LoadBalancer)
- type(mock_elb1).name = 'elb-1'
- type(mock_elb1).listeners = [1]
-
- mock_elb2 = Mock(spec_set=LoadBalancer)
- type(mock_elb2).name = 'elb-2'
- type(mock_elb2).listeners = [1, 2]
-
- mock_elb3 = Mock(spec_set=LoadBalancer)
- type(mock_elb3).name = 'elb-3'
- type(mock_elb3).listeners = [1, 2, 3]
-
- mock_elb4 = Mock(spec_set=LoadBalancer)
- type(mock_elb4).name = 'elb-4'
- type(mock_elb4).listeners = [1, 2, 3, 4, 5, 6]
-
- mock_conn = Mock(spec_set=ELBConnection)
- return_value = [
- mock_elb1,
- mock_elb2,
- mock_elb3,
- mock_elb4
- ]
+ mock_conn = Mock()
+
+ return_value = result_fixtures.ELB.test_find_usage
with patch('%s.connect' % self.pb) as mock_connect:
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
@@ -170,7 +103,12 @@ def test_find_usage(self):
assert cls._have_usage is True
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_load_balancers)
+ call(
+ mock_conn.describe_load_balancers,
+ alc_marker_path=['NextMarker'],
+ alc_data_path=['LoadBalancerDescriptions'],
+ alc_marker_param='Marker'
+ )
]
assert len(cls.limits['Active load balancers'].get_current_usage()) == 1
assert cls.limits['Active load balancers'
diff --git a/awslimitchecker/tests/services/test_newservice.py.example b/awslimitchecker/tests/services/test_newservice.py.example
index 13006cee..f3eb6f01 100644
--- a/awslimitchecker/tests/services/test_newservice.py.example
+++ b/awslimitchecker/tests/services/test_newservice.py.example
@@ -38,8 +38,7 @@ Jason Antman
"""
import sys
-# TODO confirm this is the correct import
-from boto.XXnewserviceXX.connection import XXNewServiceXXConnection
+from awslimitchecker.tests.services import result_fixtures
from awslimitchecker.services.XXnewserviceXX import _XXNewServiceXXService
# https://code.google.com/p/mock/issues/detail?id=249
@@ -53,9 +52,10 @@ else:
from unittest.mock import patch, call, Mock
-class Test_XXNewServiceXXService(object):
+pbm = 'awslimitchecker.services.XXnewserviceXX' # module patch base
+pb = '%s._XXNewServiceXXService' % pbm # class patch pase
- pb = 'awslimitchecker.services.XXnewserviceXX._XXNewServiceXXService' # patch base path
+class Test_XXNewServiceXXService(object):
def test_init(self):
"""test __init__()"""
@@ -65,27 +65,6 @@ class Test_XXNewServiceXXService(object):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- cls = _XXNewServiceXXService(21, 43)
- with patch('awslimitchecker.services.XXnewserviceXX.boto.connect_XXnewserviceXX') as mock_XXnewserviceXX:
- mock_XXnewserviceXX.return_value = mock_conn
- cls.connect()
- assert mock_XXnewserviceXX.mock_calls == [call()]
- assert mock_conn.mock_calls == []
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _XXNewServiceXXService(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.XXnewserviceXX.boto.connect_XXnewserviceXX') as mock_XXnewserviceXX:
- mock_XXnewserviceXX.return_value = mock_conn
- cls.connect()
- assert mock_XXnewserviceXX.mock_calls == []
- assert mock_conn.mock_calls == []
-
def test_get_limits(self):
cls = _XXNewServiceXXService(21, 43)
cls.limits = {}
@@ -108,9 +87,11 @@ class Test_XXNewServiceXXService(object):
assert res == mock_limits
def test_find_usage(self):
+ # put boto3 responses in response_fixtures.py, then do something like:
+ # response = result_fixtures.EBS.test_find_usage_ebs
mock_conn = Mock(spec_set=XXNewServiceXXConnection)
- with patch('%s.connect' % self.pb) as mock_connect:
- with patch('%s.boto_service_wrapper' % self.pbm) as mock_wrapper:
+ with patch('%s.connect' % pb) as mock_connect:
+ with patch('%s.boto_service_wrapper' % pbm) as mock_wrapper:
cls = _XXNewServiceXXService(21, 43)
cls.conn = mock_conn
mock_wrapper.return_value = # some logical return value
diff --git a/awslimitchecker/tests/services/test_rds.py b/awslimitchecker/tests/services/test_rds.py
index e98bdd06..186d4d88 100644
--- a/awslimitchecker/tests/services/test_rds.py
+++ b/awslimitchecker/tests/services/test_rds.py
@@ -38,8 +38,7 @@
"""
import sys
-from boto.rds2.layer1 import RDSConnection
-from boto.rds2 import connect_to_region
+from awslimitchecker.tests.services import result_fixtures
from awslimitchecker.services.rds import _RDSService
# https://code.google.com/p/mock/issues/detail?id=249
@@ -66,52 +65,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _RDSService(21, 43)
- with patch('%s.boto.connect_rds2' % self.pbm) as mock_rds:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_rds.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_rds.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _RDSService(21, 43, region='foo')
- with patch('%s.boto.connect_rds2' % self.pbm) as mock_rds:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_rds.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_rds.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """make sure we re-use the connection"""
- mock_conn = Mock()
- cls = _RDSService(21, 43)
- cls.conn = mock_conn
- with patch('awslimitchecker.services.rds.boto.connect_rds2'
- '') as mock_rds:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_rds.return_value = mock_conn
- cls.connect()
- assert mock_rds.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
-
def test_get_limits(self):
cls = _RDSService(21, 43)
cls.limits = {}
@@ -147,7 +100,7 @@ def test_get_limits_again(self):
assert res == mock_limits
def test_find_usage(self):
- mock_conn = Mock(spec_set=RDSConnection)
+ mock_conn = Mock()
with patch('%s.connect' % self.pb) as mock_connect:
with patch.multiple(
@@ -187,225 +140,24 @@ def test_required_iam_permissions(self):
]
def test_find_usage_instances(self):
- mock_conn = Mock(spec_set=RDSConnection)
+ instances = result_fixtures.RDS.test_find_usage_instances
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = instances
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- instances = [
- {
- 'PubliclyAccessible': False,
- 'MasterUsername': 'myuser',
- 'LicenseModel': 'general-public-license',
- 'VpcSecurityGroups': [
- {
- 'Status': 'active',
- 'VpcSecurityGroupId': 'sg-aaaaaaaa'
- }
- ],
- 'InstanceCreateTime': 1429910904.366,
- 'OptionGroupMemberships': [
- {
- 'Status': 'in-sync',
- 'OptionGroupName': 'default:mysql-5-6'
- }
- ],
- 'PendingModifiedValues': {
- 'MultiAZ': None,
- 'MasterUserPassword': None,
- 'Port': None,
- 'Iops': None,
- 'AllocatedStorage': None,
- 'EngineVersion': None,
- 'BackupRetentionPeriod': None,
- 'DBInstanceClass': None,
- 'DBInstanceIdentifier': None
- },
- 'Engine': 'mysql',
- 'MultiAZ': True,
- 'LatestRestorableTime': 1435966800.0,
- 'DBSecurityGroups': [
- {
- 'Status': 'active',
- 'DBSecurityGroupName': 'mydb-dbsecuritygroup-aaaa'
- }
- ],
- 'DBParameterGroups': [
- {
- 'DBParameterGroupName': 'default.mysql5.6',
- 'ParameterApplyStatus': 'in-sync'
- }
- ],
- 'ReadReplicaSourceDBInstanceIdentifier': None,
- 'AutoMinorVersionUpgrade': True,
- 'PreferredBackupWindow': '07:00-08:00',
- 'DBSubnetGroup': {
- 'VpcId': 'vpc-abcdef01',
- 'Subnets': [
- {
- 'SubnetStatus': 'Active',
- 'SubnetIdentifier': 'subnet-aaaaaaaa',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1d',
- 'ProvisionedIopsCapable': False
- }
- },
- {
- 'SubnetStatus': 'Active',
- 'SubnetIdentifier': 'subnet-22222222',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1a',
- 'ProvisionedIopsCapable': False
- }
- }
- ],
- 'DBSubnetGroupName': 'mydb-dbsubnetgroup-abcd',
- 'SubnetGroupStatus': 'Complete',
- 'DBSubnetGroupDescription': 'Subnet group for RDS instance'
- },
- 'SecondaryAvailabilityZone': 'us-east-1a',
- 'ReadReplicaDBInstanceIdentifiers': [],
- 'AllocatedStorage': 200,
- 'BackupRetentionPeriod': 7,
- 'DBName': 'wordpress',
- 'PreferredMaintenanceWindow': 'tue:08:00-tue:09:00',
- 'Endpoint': {
- 'Port': 3306,
- 'Address': 'foo.bar.us-east-1.rds.amazonaws.com'
- },
- 'DBInstanceStatus': 'available',
- 'StatusInfos': None,
- 'EngineVersion': '5.6.22',
- 'CharacterSetName': None,
- 'AvailabilityZone': 'us-east-1d',
- 'Iops': None,
- 'DBInstanceClass': 'db.t2.small',
- 'DBInstanceIdentifier': 'foo'
- },
- {
- 'PubliclyAccessible': False,
- 'MasterUsername': 'myuser2',
- 'LicenseModel': 'postgresql-license',
- 'VpcSecurityGroups': [
- {
- 'Status': 'active',
- 'VpcSecurityGroupId': 'sg-12345678'
- }
- ],
- 'InstanceCreateTime': 1432238504.239,
- 'OptionGroupMemberships': [
- {
- 'Status': 'in-sync',
- 'OptionGroupName': 'default:postgres-9-3'
- }
- ],
- 'PendingModifiedValues': {
- 'MultiAZ': None,
- 'MasterUserPassword': None,
- 'Port': None,
- 'Iops': None,
- 'AllocatedStorage': None,
- 'EngineVersion': None,
- 'BackupRetentionPeriod': None,
- 'DBInstanceClass': None,
- 'DBInstanceIdentifier': None
- },
- 'Engine': 'postgres',
- 'MultiAZ': False,
- 'LatestRestorableTime': 1435966550.0,
- 'DBSecurityGroups': [
- {
- 'Status': 'active',
- 'DBSecurityGroupName': 'sg1234-dbsecuritygroup-abcd'
- }
- ],
- 'DBParameterGroups': [
- {
- 'DBParameterGroupName': 'default.postgres9.3',
- 'ParameterApplyStatus': 'in-sync'
- }
- ],
- 'ReadReplicaSourceDBInstanceIdentifier': None,
- 'AutoMinorVersionUpgrade': True,
- 'PreferredBackupWindow': '03:09-03:39',
- 'DBSubnetGroup': {
- 'VpcId': 'vpc-87654321',
- 'Subnets': [
- {
- 'SubnetStatus': 'Active',
- 'SubnetIdentifier': 'subnet-a1234567',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1e',
- 'ProvisionedIopsCapable': False
- }
- },
- {
- 'SubnetStatus': 'Active',
- 'SubnetIdentifier': 'subnet-b1234567',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1a',
- 'ProvisionedIopsCapable': False
- }
- },
- {
- 'SubnetStatus': 'Active',
- 'SubnetIdentifier': 'subnet-c1234567',
- 'SubnetAvailabilityZone': {
- 'Name': 'us-east-1d',
- 'ProvisionedIopsCapable': False
- }
- }
- ],
- 'DBSubnetGroupName': 'mydb-dbsubnetgroup-abcdef',
- 'SubnetGroupStatus': 'Complete',
- 'DBSubnetGroupDescription': 'Subnet group for RDS instance'
- },
- 'SecondaryAvailabilityZone': None,
- 'ReadReplicaDBInstanceIdentifiers': ['db-123', 'db-456'],
- 'AllocatedStorage': 50,
- 'BackupRetentionPeriod': 1,
- 'DBName': 'mydbname',
- 'PreferredMaintenanceWindow': 'mon:05:11-mon:05:41',
- 'Endpoint': {
- 'Port': 5432,
- 'Address': 'baz.blam.us-east-1.rds.amazonaws.com'
- },
- 'DBInstanceStatus': 'available',
- 'StatusInfos': None,
- 'EngineVersion': '9.3.6',
- 'CharacterSetName': None,
- 'AvailabilityZone': 'us-east-1a',
- 'Iops': None,
- 'DBInstanceClass': 'db.t2.small',
- 'DBInstanceIdentifier': 'baz'
- }
+
+ cls._find_usage_instances()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_db_instances'),
+ call.get_paginator().paginate()
]
- return_value = {
- 'DescribeDBInstancesResponse': {
- 'DescribeDBInstancesResult': {
- 'DBInstances': instances
- }
- }
- }
-
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = return_value
- cls._find_usage_instances()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_db_instances,
- alc_marker_path=[
- 'DescribeDBInstancesResponse',
- 'DescribeDBInstancesResult',
- 'Marker'
- ],
- alc_data_path=[
- 'DescribeDBInstancesResponse',
- 'DescribeDBInstancesResult',
- 'DBInstances'
- ],
- alc_marker_param='marker'
- )
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['DB instances'].get_current_usage())
@@ -428,108 +180,24 @@ def test_find_usage_instances(self):
assert usage[1].resource_id == 'baz'
def test_find_usage_snapshots(self):
- data = {
- "DescribeDBSnapshotsResponse": {
- "DescribeDBSnapshotsResult": {
- "DBSnapshots": [
- {
- "AllocatedStorage": 100,
- "AvailabilityZone": "us-east-1a",
- "DBInstanceIdentifier": "foo-db",
- "DBSnapshotIdentifier": "foo-db-final-snapshot",
- "Engine": "postgres",
- "EngineVersion": "9.3.3",
- "InstanceCreateTime": 1408035263.101,
- "Iops": 1000,
- "LicenseModel": "postgresql-license",
- "MasterUsername": "dbfoouser",
- "OptionGroupName": "default:postgres-9-3",
- "PercentProgress": 100,
- "Port": 5432,
- "SnapshotCreateTime": 1408454469.536,
- "SnapshotType": "manual",
- "SourceRegion": None,
- "Status": "available",
- "VpcId": None
- },
- {
- "AllocatedStorage": 50,
- "AvailabilityZone": "us-east-1d",
- "DBInstanceIdentifier": "bd1t3lf90p3lqdx",
- "DBSnapshotIdentifier":
- "rds:bd1t3lf90p3lqdx-2015-06-29-07-02",
- "Engine": "mysql",
- "EngineVersion": "5.6.22",
- "InstanceCreateTime": 1429910904.366,
- "Iops": None,
- "LicenseModel": "general-public-license",
- "MasterUsername": "dbuser3",
- "OptionGroupName": "default:mysql-5-6",
- "PercentProgress": 100,
- "Port": 3306,
- "SnapshotCreateTime": 1435561349.441,
- "SnapshotType": "automated",
- "SourceRegion": None,
- "Status": "available",
- "VpcId": "vpc-1ee8937b"
- },
- {
- "AllocatedStorage": 25,
- "AvailabilityZone": "us-east-1d",
- "DBInstanceIdentifier": "md1e8qwtegkjdgy",
- "DBSnapshotIdentifier":
- "rds:md1e8qwtegkjdgy-2015-06-29-07-06",
- "Engine": "postgres",
- "EngineVersion": "9.3.6",
- "InstanceCreateTime": 1433883813.314,
- "Iops": None,
- "LicenseModel": "postgresql-license",
- "MasterUsername": "dbuser4",
- "OptionGroupName": "default:postgres-9-3",
- "PercentProgress": 100,
- "Port": 5432,
- "SnapshotCreateTime": 1435561593.669,
- "SnapshotType": "automated",
- "SourceRegion": None,
- "Status": "available",
- "VpcId": "vpc-1ee8937b"
- },
- ],
- "Marker":
- "YXJuOmF3czpyZHM6dXMtZWFzdC0xOjkzNDQ0NjIwOTU0MTpzbm"
- "Fwc2hvdDpyZHM6bWQxZThxd3RlZ2tqZGd5LTIwMTUtMDctMDEt"
- "MDctMDc="
- },
- "ResponseMetadata": {
- "RequestId": "5fe976b3-2499-11e5-ad5a-1fed04d9fd3d"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ response = result_fixtures.RDS.test_find_usage_snapshots
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = response
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_snapshots()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_db_snapshots,
- alc_marker_path=[
- "DescribeDBSnapshotsResponse",
- "DescribeDBSnapshotsResult",
- 'Marker'
- ],
- alc_data_path=[
- "DescribeDBSnapshotsResponse",
- "DescribeDBSnapshotsResult",
- "DBSnapshots"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_snapshots()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_db_snapshots'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['DB snapshots per user'].get_current_usage())
@@ -538,55 +206,24 @@ def test_find_usage_snapshots(self):
assert usage[0].aws_type == 'AWS::RDS::DBSnapshot'
def test_find_usage_param_groups(self):
- data = {
- "DescribeDBParameterGroupsResponse": {
- "DescribeDBParameterGroupsResult": {
- "DBParameterGroups": [
- {
- "DBParameterGroupFamily": "mysql5.6",
- "DBParameterGroupName": "default.mysql5.6",
- "Description":
- "Default parameter group for mysql5.6"
- },
- {
- "DBParameterGroupFamily": "postgres9.3",
- "DBParameterGroupName": "default.postgres9.3",
- "Description":
- "Default parameter group for postgres9.3"
- }
- ],
- "Marker": None
- },
- "ResponseMetadata": {
- "RequestId": "xxxxxxxxxxxxxxx"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_param_groups
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_param_groups()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_db_parameter_groups,
- alc_marker_path=[
- "DescribeDBParameterGroupsResponse",
- "DescribeDBParameterGroupsResult",
- 'Marker'
- ],
- alc_data_path=[
- "DescribeDBParameterGroupsResponse",
- "DescribeDBParameterGroupsResult",
- "DBParameterGroups"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_param_groups()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_db_parameter_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['DB parameter groups'].get_current_usage())
@@ -595,120 +232,24 @@ def test_find_usage_param_groups(self):
assert usage[0].aws_type == 'AWS::RDS::DBParameterGroup'
def test_find_usage_subnet_groups(self):
- data = {
- "DescribeDBSubnetGroupsResponse": {
- "DescribeDBSubnetGroupsResult": {
- "DBSubnetGroups": [
- {
- "DBSubnetGroupDescription":
- "Subnet group for CloudFormation RDS instance",
- "DBSubnetGroupName":
- "SubnetGroup1",
- "SubnetGroupStatus": "Complete",
- "Subnets": [
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1d",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-38e87861",
- "SubnetStatus": "Active"
- },
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1a",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-4f027f38",
- "SubnetStatus": "Active"
- }
- ],
- "VpcId": "vpc-1ee8937b"
- },
- {
- "DBSubnetGroupDescription":
- "Created from the RDS Management Console",
- "DBSubnetGroupName": "default",
- "SubnetGroupStatus": "Complete",
- "Subnets": [
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1e",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-49071f61",
- "SubnetStatus": "Active"
- },
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1a",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-6fe23c18",
- "SubnetStatus": "Active"
- },
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1d",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-a9b54df0",
- "SubnetStatus": "Active"
- }
- ],
- "VpcId": "vpc-c300b9a6"
- },
- {
- "DBSubnetGroupDescription":
- "Subnet group for CloudFormation RDS instance",
- "DBSubnetGroupName":
- "SubnetGroup2",
- "SubnetGroupStatus": "Complete",
- "Subnets": [
- {
- "SubnetAvailabilityZone": {
- "Name": "us-east-1a",
- "ProvisionedIopsCapable": False
- },
- "SubnetIdentifier": "subnet-0b037e7c",
- "SubnetStatus": "Active"
- }
- ],
- "VpcId": "vpc-73ec9716"
- },
- ],
- "Marker": None
- },
- "ResponseMetadata": {
- "RequestId": "7cd7ed68-2499-11e5-ad44-cdf98c606d42"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_subnet_groups
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_subnet_groups()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_db_subnet_groups,
- alc_marker_path=[
- "DescribeDBSubnetGroupsResponse",
- "DescribeDBSubnetGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeDBSubnetGroupsResponse",
- "DescribeDBSubnetGroupsResult",
- "DBSubnetGroups"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_subnet_groups()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_db_subnet_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['Subnet Groups'].get_current_usage())
@@ -730,63 +271,24 @@ def test_find_usage_subnet_groups(self):
assert usage[2].resource_id == "default"
def test_find_usage_option_groups(self):
- data = {
- "DescribeOptionGroupsResponse": {
- "DescribeOptionGroupsResult": {
- "Marker": None,
- "OptionGroupsList": [
- {
- "AllowsVpcAndNonVpcInstanceMemberships": True,
- "EngineName": "mysql",
- "MajorEngineVersion": "5.6",
- "OptionGroupDescription":
- "Default option group for mysql 5.6",
- "OptionGroupName": "default:mysql-5-6",
- "Options": [],
- "VpcId": None
- },
- {
- "AllowsVpcAndNonVpcInstanceMemberships": True,
- "EngineName": "postgres",
- "MajorEngineVersion": "9.3",
- "OptionGroupDescription":
- "Default option group for postgres 9.3",
- "OptionGroupName": "default:postgres-9-3",
- "Options": [],
- "VpcId": None
- }
- ]
- },
- "ResponseMetadata": {
- "RequestId": "8725ddc9-2499-11e5-9ed1-d5a3270e57f9"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_option_groups
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_option_groups()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_option_groups,
- alc_marker_path=[
- "DescribeOptionGroupsResponse",
- "DescribeOptionGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeOptionGroupsResponse",
- "DescribeOptionGroupsResult",
- "OptionGroupsList"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_option_groups()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_option_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['Option Groups'].get_current_usage())
@@ -795,159 +297,50 @@ def test_find_usage_option_groups(self):
assert usage[0].aws_type == 'AWS::RDS::DBOptionGroup'
def test_find_usage_event_subscriptions(self):
- # @TODO update this with realistic test data
- data = {
- "DescribeEventSubscriptionsResponse": {
- "DescribeEventSubscriptionsResult": {
- "EventSubscriptionsList": ['a'],
- "Marker": None
- },
- "ResponseMetadata": {
- "RequestId": "91c0b568-2499-11e5-8440-1fb643a72e45"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_event_subscriptions
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_event_subscriptions()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_event_subscriptions,
- alc_marker_path=[
- "DescribeEventSubscriptionsResponse",
- "DescribeEventSubscriptionsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeEventSubscriptionsResponse",
- "DescribeEventSubscriptionsResult",
- "EventSubscriptionsList"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_event_subscriptions()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_event_subscriptions'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['Event Subscriptions'].get_current_usage())
assert len(usage) == 1
- assert usage[0].get_value() == 1
+ assert usage[0].get_value() == 2
assert usage[0].aws_type == 'AWS::RDS::EventSubscription'
def test_find_usage_security_groups(self):
- data = {
- "DescribeDBSecurityGroupsResponse": {
- "DescribeDBSecurityGroupsResult": {
- "DBSecurityGroups": [
- {
- "DBSecurityGroupDescription": "Frontend Access",
- "DBSecurityGroupName":
- "SecurityGroup1",
- "EC2SecurityGroups": [
- {
- "EC2SecurityGroupId": "sg-c6dd95a2",
- "EC2SecurityGroupName":
- "EC2SG1",
- "EC2SecurityGroupOwnerId": None,
- "Status": "authorized"
- }
- ],
- "IPRanges": [],
- "OwnerId": "123456789012",
- "VpcId": None
- },
- {
- "DBSecurityGroupDescription":
- "default:vpc-a926c2cc",
- "DBSecurityGroupName": "default:vpc-a926c2cc",
- "EC2SecurityGroups": [],
- "IPRanges": [],
- "OwnerId": "123456789012",
- "VpcId": "vpc-a926c2cc"
- },
- {
- "DBSecurityGroupDescription": "Frontend Access",
- "DBSecurityGroupName": "SecurityGroup2",
- "EC2SecurityGroups": [
- {
- "EC2SecurityGroupId": "sg-aaaaaaaa",
- "EC2SecurityGroupName": "SGName-aaaaaaaa",
- "EC2SecurityGroupOwnerId": None,
- "Status": "authorized"
- },
- {
- "EC2SecurityGroupId": "sg-bbbbbbbb",
- "EC2SecurityGroupName": "SGName-bbbbbbbb",
- "EC2SecurityGroupOwnerId": None,
- "Status": "authorized"
- },
- {
- "EC2SecurityGroupId": "sg-cccccccc",
- "EC2SecurityGroupName": "SGName-cccccccc",
- "EC2SecurityGroupOwnerId": None,
- "Status": "authorized"
- },
- ],
- "IPRanges": [],
- "OwnerId": "123456789012",
- "VpcId": "vpc-73ec9716"
- },
- {
- 'VpcId': None,
- 'DBSecurityGroupDescription':
- 'awslimitchecker test',
- 'IPRanges': [
- {
- 'Status': 'authorized',
- 'CIDRIP': '76.122.124.15/32'
- },
- {
- 'Status': 'authorized',
- 'CIDRIP': '66.6.152.59/32'
- }
- ],
- 'OwnerId': '123456789012',
- 'EC2SecurityGroups': [],
- 'DBSecurityGroupName': 'alctest'
- }
- ],
- "Marker": None
- },
- "ResponseMetadata": {
- "RequestId": "9c78d95d-2499-11e5-9456-735a7f5001de"
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_security_groups
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_security_groups()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_db_security_groups,
- alc_marker_path=[
- "DescribeDBSecurityGroupsResponse",
- "DescribeDBSecurityGroupsResult",
- "Marker"
- ],
- alc_data_path=[
- "DescribeDBSecurityGroupsResponse",
- "DescribeDBSecurityGroupsResult",
- "DBSecurityGroups"
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_security_groups()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_db_security_groups'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['DB security groups'].get_current_usage())
@@ -977,43 +370,24 @@ def test_find_usage_security_groups(self):
assert usage[3].aws_type == 'AWS::RDS::DBSecurityGroup'
def test_find_usage_reserved_instances(self):
- # @TODO update this with realistic test data
- data = {
- 'DescribeReservedDBInstancesResponse': {
- 'DescribeReservedDBInstancesResult': {
- 'Marker': None,
- 'ReservedDBInstances': [1, 2]
- },
- 'ResponseMetadata': {
- 'RequestId': '75366d86-25a9-11e5-b6fa-c9da955772c6'
- }
- }
- }
-
- mock_conn = Mock(spec_set=RDSConnection)
+ data = result_fixtures.RDS.test_find_usage_reserved_instances
+
+ mock_conn = Mock()
+ mock_paginator = Mock()
+ mock_paginator.paginate.return_value = data
+ mock_conn.get_paginator.return_value = mock_paginator
+
cls = _RDSService(21, 43)
cls.conn = mock_conn
- with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = data
- cls._find_usage_reserved_instances()
-
- assert mock_conn.mock_calls == []
- assert mock_wrapper.mock_calls == [
- call(
- mock_conn.describe_reserved_db_instances,
- alc_marker_path=[
- 'DescribeReservedDBInstancesResponse',
- 'DescribeReservedDBInstancesResult',
- "Marker"
- ],
- alc_data_path=[
- 'DescribeReservedDBInstancesResponse',
- 'DescribeReservedDBInstancesResult',
- 'ReservedDBInstances'
- ],
- alc_marker_param='marker'
- )
+ cls._find_usage_reserved_instances()
+
+ assert mock_conn.mock_calls == [
+ call.get_paginator('describe_reserved_db_instances'),
+ call.get_paginator().paginate()
+ ]
+ assert mock_paginator.mock_calls == [
+ call.paginate()
]
usage = sorted(cls.limits['Reserved Instances'].get_current_usage())
diff --git a/awslimitchecker/tests/services/test_vpc.py b/awslimitchecker/tests/services/test_vpc.py
index de5d94e5..1a8f754f 100644
--- a/awslimitchecker/tests/services/test_vpc.py
+++ b/awslimitchecker/tests/services/test_vpc.py
@@ -38,15 +38,7 @@
"""
import sys
-
-from boto.vpc import VPCConnection
-from boto.vpc.vpc import VPC
-from boto.vpc.subnet import Subnet
-from boto.vpc.networkacl import NetworkAcl
-from boto.vpc.routetable import RouteTable
-from boto.vpc.internetgateway import InternetGateway
-from boto.vpc import connect_to_region
-
+from awslimitchecker.tests.services import result_fixtures
from awslimitchecker.services.vpc import _VpcService
# https://code.google.com/p/mock/issues/detail?id=249
@@ -73,54 +65,6 @@ def test_init(self):
assert cls.warning_threshold == 21
assert cls.critical_threshold == 43
- def test_connect(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _VpcService(21, 43)
- with patch('%s.boto.connect_vpc' % self.pbm) as mock_vpc:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_vpc.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_vpc.mock_calls == [call()]
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
- def test_connect_region(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _VpcService(21, 43, region='foo')
- with patch('%s.boto.connect_vpc' % self.pbm) as mock_vpc:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_vpc.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_vpc.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
- assert cls.conn == mock_conn_via
-
- def test_connect_again(self):
- """test connect()"""
- mock_conn = Mock()
- mock_conn_via = Mock()
- cls = _VpcService(21, 43)
- cls.conn = mock_conn
- with patch('%s.boto.connect_vpc' % self.pbm) as mock_vpc:
- with patch('%s.connect_via' % self.pb) as mock_connect_via:
- mock_vpc.return_value = mock_conn
- mock_connect_via.return_value = mock_conn_via
- cls.connect()
- assert mock_vpc.mock_calls == []
- assert mock_conn.mock_calls == []
- assert mock_connect_via.mock_calls == []
- assert cls.conn == mock_conn
-
def test_get_limits(self):
cls = _VpcService(21, 43)
cls.limits = {}
@@ -148,7 +92,7 @@ def test_get_limits_again(self):
assert res == mock_limits
def test_find_usage(self):
- mock_conn = Mock(spec_set=VPCConnection)
+ mock_conn = Mock()
with patch('%s.connect' % self.pb) as mock_connect:
with patch.multiple(
@@ -176,44 +120,33 @@ def test_find_usage(self):
assert mocks[x].mock_calls == [call()]
def test_find_usage_vpcs(self):
- mock1 = Mock(spec_set=VPC)
- type(mock1).id = 'vpc-1'
- mock2 = Mock(spec_set=VPC)
- type(mock2).id = 'vpc-2'
+ response = result_fixtures.VPC.test_find_usage_vpcs
- vpcs = [mock1, mock2]
-
- mock_conn = Mock(spec_set=VPCConnection)
+ mock_conn = Mock()
cls = _VpcService(21, 43)
cls.conn = mock_conn
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = vpcs
+ mock_wrapper.return_value = response
cls._find_usage_vpcs()
assert len(cls.limits['VPCs'].get_current_usage()) == 1
assert cls.limits['VPCs'].get_current_usage()[0].get_value() == 2
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_vpcs)
+ call(mock_conn.describe_vpcs, alc_no_paginate=True)
]
def test_find_usage_subnets(self):
- mock1 = Mock(spec_set=Subnet)
- type(mock1).vpc_id = 'vpc-1'
- mock2 = Mock(spec_set=Subnet)
- type(mock2).vpc_id = 'vpc-1'
- mock3 = Mock(spec_set=Subnet)
- type(mock3).vpc_id = 'vpc-2'
-
- subnets = [mock1, mock2, mock3]
- mock_conn = Mock(spec_set=VPCConnection)
+ response = result_fixtures.VPC.test_find_usage_subnets
+
+ mock_conn = Mock()
cls = _VpcService(21, 43)
cls.conn = mock_conn
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = subnets
+ mock_wrapper.return_value = response
cls._find_usage_subnets()
usage = sorted(cls.limits['Subnets per VPC'].get_current_usage())
@@ -224,31 +157,18 @@ def test_find_usage_subnets(self):
assert usage[1].resource_id == 'vpc-1'
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_subnets)
+ call(mock_conn.describe_subnets, alc_no_paginate=True)
]
def test_find_usage_acls(self):
- mock1 = Mock(spec_set=NetworkAcl)
- type(mock1).id = 'acl-1'
- type(mock1).vpc_id = 'vpc-1'
- type(mock1).network_acl_entries = [1, 2, 3]
- mock2 = Mock(spec_set=NetworkAcl)
- type(mock2).id = 'acl-2'
- type(mock2).vpc_id = 'vpc-1'
- type(mock2).network_acl_entries = [1]
- mock3 = Mock(spec_set=NetworkAcl)
- type(mock3).id = 'acl-3'
- type(mock3).vpc_id = 'vpc-2'
- type(mock3).network_acl_entries = [1, 2, 3, 4, 5]
-
- acls = [mock1, mock2, mock3]
- mock_conn = Mock(spec_set=VPCConnection)
+ response = result_fixtures.VPC.test_find_usage_acls
+ mock_conn = Mock()
cls = _VpcService(21, 43)
cls.conn = mock_conn
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = acls
+ mock_wrapper.return_value = response
cls._find_usage_ACLs()
usage = sorted(cls.limits['Network ACLs per VPC'].get_current_usage())
@@ -268,32 +188,19 @@ def test_find_usage_acls(self):
assert entries[2].get_value() == 5
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_network_acls)
+ call(mock_conn.describe_network_acls, alc_no_paginate=True)
]
def test_find_usage_route_tables(self):
- mock1 = Mock(spec_set=RouteTable)
- type(mock1).id = 'rt-1'
- type(mock1).vpc_id = 'vpc-1'
- type(mock1).routes = [1, 2, 3]
- mock2 = Mock(spec_set=RouteTable)
- type(mock2).id = 'rt-2'
- type(mock2).vpc_id = 'vpc-1'
- type(mock2).routes = [1]
- mock3 = Mock(spec_set=RouteTable)
- type(mock3).id = 'rt-3'
- type(mock3).vpc_id = 'vpc-2'
- type(mock3).routes = [1, 2, 3, 4, 5]
-
- tables = [mock1, mock2, mock3]
-
- mock_conn = Mock(spec_set=VPCConnection)
+ response = result_fixtures.VPC.test_find_usage_route_tables
+
+ mock_conn = Mock()
cls = _VpcService(21, 43)
cls.conn = mock_conn
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = tables
+ mock_wrapper.return_value = response
cls._find_usage_route_tables()
usage = sorted(cls.limits['Route tables per VPC'].get_current_usage())
@@ -313,24 +220,19 @@ def test_find_usage_route_tables(self):
assert entries[2].get_value() == 5
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_route_tables)
+ call(mock_conn.describe_route_tables, alc_no_paginate=True)
]
def test_find_usage_internet_gateways(self):
- mock1 = Mock(spec_set=InternetGateway)
- type(mock1).id = 'gw-1'
- mock2 = Mock(spec_set=InternetGateway)
- type(mock2).id = 'gw-2'
-
- gateways = [mock1, mock2]
+ response = result_fixtures.VPC.test_find_usage_internet_gateways
- mock_conn = Mock(spec_set=VPCConnection)
+ mock_conn = Mock()
cls = _VpcService(21, 43)
cls.conn = mock_conn
with patch('%s.boto_query_wrapper' % self.pbm) as mock_wrapper:
- mock_wrapper.return_value = gateways
+ mock_wrapper.return_value = response
cls._find_usage_gateways()
assert len(cls.limits['Internet gateways'].get_current_usage()) == 1
@@ -338,7 +240,7 @@ def test_find_usage_internet_gateways(self):
0].get_value() == 2
assert mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(mock_conn.get_all_internet_gateways)
+ call(mock_conn.describe_internet_gateways, alc_no_paginate=True)
]
def test_required_iam_permissions(self):
diff --git a/awslimitchecker/tests/test_connectable.py b/awslimitchecker/tests/test_connectable.py
index 7ea892a6..dabe687b 100644
--- a/awslimitchecker/tests/test_connectable.py
+++ b/awslimitchecker/tests/test_connectable.py
@@ -37,7 +37,8 @@
################################################################################
"""
-from awslimitchecker.connectable import Connectable
+from awslimitchecker.connectable import Connectable, ConnectableCredentials
+from datetime import datetime
import sys
# https://code.google.com/p/mock/issues/detail?id=249
@@ -46,9 +47,13 @@
sys.version_info[0] < 3 or
sys.version_info[0] == 3 and sys.version_info[1] < 4
):
- from mock import patch, call, Mock
+ from mock import patch, call, Mock, PropertyMock
else:
- from unittest.mock import patch, call, Mock
+ from unittest.mock import patch, call, Mock, PropertyMock
+
+
+pbm = 'awslimitchecker.connectable'
+pb = '%s.Connectable' % pbm
class ConnectableTester(Connectable):
@@ -62,6 +67,7 @@ def __init__(self, account_id=None, account_role=None, region=None,
self.account_role = account_role
self.region = region
self.conn = None
+ self.resource_conn = None
self.external_id = external_id
self.mfa_serial_number = mfa_serial_number
self.mfa_token = mfa_token
@@ -69,122 +75,248 @@ def __init__(self, account_id=None, account_role=None, region=None,
class Test_Connectable(object):
- def test_connect_via_no_region(self):
+ def test_boto3_connection_kwargs(self):
cls = ConnectableTester()
- mock_driver = Mock()
- res = cls.connect_via(mock_driver)
- assert mock_driver.mock_calls == [
- call(None)
+
+ with patch('%s._get_sts_token_boto3' % pb) as mock_get_sts:
+ with patch('%s.logger' % pbm) as mock_logger:
+ Connectable.credentials = None
+ res = cls._boto3_connection_kwargs
+ assert mock_get_sts.mock_calls == []
+ assert mock_logger.mock_calls == [
+ call.debug('Connecting to region %s', None)
]
- assert res == mock_driver.return_value
-
- def test_connect_via_with_region(self):
- cls = ConnectableTester(region='foo')
- mock_driver = Mock()
- with patch('awslimitchecker.connectable.Connectable._get_sts_token'
- '') as mock_get_sts:
- res = cls.connect_via(mock_driver)
+ assert res == {
+ 'region_name': None
+ }
+
+ def test_boto3_connection_kwargs_region(self):
+ cls = ConnectableTester(region='myregion')
+
+ with patch('%s._get_sts_token_boto3' % pb) as mock_get_sts:
+ with patch('%s.logger' % pbm) as mock_logger:
+ Connectable.credentials = None
+ res = cls._boto3_connection_kwargs
assert mock_get_sts.mock_calls == []
- assert mock_driver.mock_calls == [
- call('foo')
+ assert mock_logger.mock_calls == [
+ call.debug('Connecting to region %s', 'myregion')
]
- assert res == mock_driver.return_value
+ assert res == {
+ 'region_name': 'myregion'
+ }
- def test_connect_via_sts(self):
+ def test_boto3_connection_kwargs_sts(self):
cls = ConnectableTester(account_id='123', account_role='myrole',
region='myregion')
- mock_driver = Mock()
mock_creds = Mock()
type(mock_creds).access_key = 'sts_ak'
type(mock_creds).secret_key = 'sts_sk'
type(mock_creds).session_token = 'sts_token'
- with patch('awslimitchecker.connectable.Connectable._get_sts_token'
- '') as mock_get_sts:
- mock_get_sts.return_value = mock_creds
- Connectable.credentials = None
- res = cls.connect_via(mock_driver)
+ with patch('%s._get_sts_token_boto3' % pb) as mock_get_sts:
+ with patch('%s.logger' % pbm) as mock_logger:
+ mock_get_sts.return_value = mock_creds
+ Connectable.credentials = None
+ res = cls._boto3_connection_kwargs
assert mock_get_sts.mock_calls == [call()]
- assert mock_driver.mock_calls == [
- call(
- 'myregion',
- aws_access_key_id='sts_ak',
- aws_secret_access_key='sts_sk',
- security_token='sts_token'
- )
+ assert mock_logger.mock_calls == [
+ call.debug("Connecting for account %s role '%s' with STS "
+ "(region: %s)", '123', 'myrole', 'myregion')
]
- assert res == mock_driver.return_value
+ assert res == {
+ 'region_name': 'myregion',
+ 'aws_access_key_id': 'sts_ak',
+ 'aws_secret_access_key': 'sts_sk',
+ 'aws_session_token': 'sts_token'
+ }
- def test_connect_via_sts_again(self):
+ def test_boto3_connection_kwargs_sts_again(self):
cls = ConnectableTester(account_id='123', account_role='myrole',
region='myregion')
- mock_driver = Mock()
mock_creds = Mock()
type(mock_creds).access_key = 'sts_ak'
type(mock_creds).secret_key = 'sts_sk'
type(mock_creds).session_token = 'sts_token'
- with patch('awslimitchecker.connectable.Connectable._get_sts_token'
- '') as mock_get_sts:
- Connectable.credentials = mock_creds
- res = cls.connect_via(mock_driver)
+ with patch('%s._get_sts_token_boto3' % pb) as mock_get_sts:
+ with patch('%s.logger' % pbm) as mock_logger:
+ mock_get_sts.return_value = mock_creds
+ Connectable.credentials = mock_creds
+ res = cls._boto3_connection_kwargs
assert mock_get_sts.mock_calls == []
- assert mock_driver.mock_calls == [
+ assert mock_logger.mock_calls == [
+ call.debug('Reusing previous STS credentials for account %s', '123')
+ ]
+ assert res == {
+ 'region_name': 'myregion',
+ 'aws_access_key_id': 'sts_ak',
+ 'aws_secret_access_key': 'sts_sk',
+ 'aws_session_token': 'sts_token'
+ }
+
+ def test_connect(self):
+ mock_conn = Mock()
+ mock_cc = Mock()
+ type(mock_cc).region_name = 'myregion'
+ type(mock_conn)._client_config = mock_cc
+
+ cls = ConnectableTester()
+ cls.api_name = 'myapi'
+ kwargs = {'foo': 'fooval', 'bar': 'barval'}
+
+ with patch('%s._boto3_connection_kwargs' % pb,
+ new_callable=PropertyMock) as mock_kwargs:
+ mock_kwargs.return_value = kwargs
+ with patch('%s.logger' % pbm) as mock_logger:
+ with patch('%s.boto3.client' % pbm) as mock_client:
+ mock_client.return_value = mock_conn
+ cls.connect()
+ assert mock_kwargs.mock_calls == [call()]
+ assert mock_logger.mock_calls == [
+ call.info("Connected to %s in region %s",
+ 'myapi',
+ 'myregion')
+ ]
+ assert mock_client.mock_calls == [
call(
- 'myregion',
- aws_access_key_id='sts_ak',
- aws_secret_access_key='sts_sk',
- security_token='sts_token'
+ 'myapi',
+ foo='fooval',
+ bar='barval'
)
]
- assert res == mock_driver.return_value
+ assert cls.conn == mock_client.return_value
- def test_get_sts_token(self):
- cls = ConnectableTester(account_id='789',
- account_role='myr', region='foobar')
- with patch('awslimitchecker.connectable.boto.sts.connect_to_region'
- '') as mock_connect:
- res = cls._get_sts_token()
- arn = 'arn:aws:iam::789:role/myr'
- assert mock_connect.mock_calls == [
- call('foobar'),
- call().assume_role(arn, 'awslimitchecker', external_id=None,
- mfa_serial_number=None, mfa_token=None),
- ]
- assume_role_ret = mock_connect.return_value.assume_role.return_value
- assert res == assume_role_ret.credentials
+ def test_connect_again(self):
+ mock_conn = Mock()
+ mock_cc = Mock()
+ type(mock_cc).region_name = 'myregion'
+ type(mock_conn)._client_config = mock_cc
- def test_get_sts_token_external_id(self):
- cls = ConnectableTester(account_id='789',
- account_role='myr', region='foobar',
- external_id='myextid')
- with patch('awslimitchecker.connectable.boto.sts.connect_to_region'
- '') as mock_connect:
- res = cls._get_sts_token()
- arn = 'arn:aws:iam::789:role/myr'
- assert mock_connect.mock_calls == [
- call('foobar'),
- call().assume_role(arn, 'awslimitchecker', external_id='myextid',
- mfa_serial_number=None, mfa_token=None),
+ cls = ConnectableTester()
+ cls.conn = mock_conn
+ cls.api_name = 'myapi'
+ kwargs = {'foo': 'fooval', 'bar': 'barval'}
+
+ with patch('%s._boto3_connection_kwargs' % pb,
+ new_callable=PropertyMock) as mock_kwargs:
+ mock_kwargs.return_value = kwargs
+ with patch('%s.logger' % pbm) as mock_logger:
+ with patch('%s.boto3.client' % pbm) as mock_client:
+ mock_client.return_value = mock_conn
+ cls.connect()
+ assert mock_kwargs.mock_calls == []
+ assert mock_logger.mock_calls == []
+ assert mock_client.mock_calls == []
+ assert cls.conn == mock_conn
+
+ def test_connect_resource(self):
+ mock_conn = Mock()
+ mock_meta = Mock()
+ mock_client = Mock()
+ mock_cc = Mock()
+ type(mock_cc).region_name = 'myregion'
+ type(mock_client)._client_config = mock_cc
+ type(mock_meta).client = mock_client
+ type(mock_conn).meta = mock_meta
+
+ cls = ConnectableTester()
+ cls.api_name = 'myapi'
+ kwargs = {'foo': 'fooval', 'bar': 'barval'}
+
+ with patch('%s._boto3_connection_kwargs' % pb,
+ new_callable=PropertyMock) as mock_kwargs:
+ mock_kwargs.return_value = kwargs
+ with patch('%s.logger' % pbm) as mock_logger:
+ with patch('%s.boto3.resource' % pbm) as mock_resource:
+ mock_resource.return_value = mock_conn
+ cls.connect_resource()
+ assert mock_kwargs.mock_calls == [call()]
+ assert mock_logger.mock_calls == [
+ call.info("Connected to %s (resource) in region %s",
+ 'myapi',
+ 'myregion')
+ ]
+ assert mock_resource.mock_calls == [
+ call(
+ 'myapi',
+ foo='fooval',
+ bar='barval'
+ )
]
- assume_role_ret = mock_connect.return_value.assume_role.return_value
- assert res == assume_role_ret.credentials
+ assert cls.resource_conn == mock_resource.return_value
+
+ def test_connect_resource_again(self):
+ mock_conn = Mock()
+ mock_meta = Mock()
+ mock_client = Mock()
+ mock_cc = Mock()
+ type(mock_cc).region_name = 'myregion'
+ type(mock_client)._client_config = mock_cc
+ type(mock_meta).client = mock_client
+ type(mock_conn).meta = mock_meta
+
+ cls = ConnectableTester()
+ cls.api_name = 'myapi'
+ cls.resource_conn = mock_conn
+ kwargs = {'foo': 'fooval', 'bar': 'barval'}
+
+ with patch('%s._boto3_connection_kwargs' % pb,
+ new_callable=PropertyMock) as mock_kwargs:
+ mock_kwargs.return_value = kwargs
+ with patch('%s.logger' % pbm) as mock_logger:
+ with patch('%s.boto3.resource' % pbm) as mock_resource:
+ mock_resource.return_value = mock_conn
+ cls.connect_resource()
+ assert mock_kwargs.mock_calls == []
+ assert mock_logger.mock_calls == []
+ assert mock_resource.mock_calls == []
+ assert cls.resource_conn == mock_conn
- def test_get_sts_token_mfa(self):
+ def test_get_sts_token_boto3(self):
+ ret_dict = Mock()
cls = ConnectableTester(account_id='789',
- account_role='myr', region='foobar',
- external_id='myextid',
- mfa_serial_number='arn:aws:iam::456:mfa/me',
- mfa_token='123456')
- with patch('awslimitchecker.connectable.boto.sts.connect_to_region'
- '') as mock_connect:
- res = cls._get_sts_token()
+ account_role='myr', region='foobar')
+ with patch('%s.boto3.client' % pbm) as mock_connect:
+ with patch('%s.ConnectableCredentials' % pbm,
+ create=True) as mock_creds:
+ mock_connect.return_value.assume_role.return_value = ret_dict
+ res = cls._get_sts_token_boto3()
arn = 'arn:aws:iam::789:role/myr'
assert mock_connect.mock_calls == [
- call('foobar'),
- call().assume_role(arn, 'awslimitchecker', external_id='myextid',
- mfa_serial_number='arn:aws:iam::456:mfa/me',
- mfa_token='123456'),
+ call('sts', region_name='foobar'),
+ call().assume_role(
+ RoleArn=arn,
+ RoleSessionName='awslimitchecker',
+ ExternalId=None,
+ SerialNumber=None,
+ TokenCode=None),
+ ]
+ assert mock_creds.mock_calls == [
+ call(ret_dict)
]
- assume_role_ret = mock_connect.return_value.assume_role.return_value
- assert res == assume_role_ret.credentials
+ assert res == mock_creds.return_value
+
+
+class TestConnectableCredentials(object):
+
+ def test_connectable_credentials(self):
+ result = {
+ 'Credentials': {
+ 'AccessKeyId': 'akid',
+ 'SecretAccessKey': 'secret',
+ 'SessionToken': 'token',
+ 'Expiration': datetime(2015, 1, 1)
+ },
+ 'AssumedRoleUser': {
+ 'AssumedRoleId': 'roleid',
+ 'Arn': 'arn'
+ },
+ 'PackedPolicySize': 123
+ }
+ c = ConnectableCredentials(result)
+ assert c.access_key == 'akid'
+ assert c.secret_key == 'secret'
+ assert c.session_token == 'token'
+ assert c.expiration == datetime(2015, 1, 1)
+ assert c.assumed_role_id == 'roleid'
+ assert c.assumed_role_arn == 'arn'
diff --git a/awslimitchecker/tests/test_runner.py b/awslimitchecker/tests/test_runner.py
index faf31b48..3119062f 100644
--- a/awslimitchecker/tests/test_runner.py
+++ b/awslimitchecker/tests/test_runner.py
@@ -107,7 +107,7 @@ def test_parse_args(self):
def test_parse_args_parser(self):
argv = ['-V']
- desc = 'Report on AWS service limits and usage via boto, optionally ' \
+ desc = 'Report on AWS service limits and usage via boto3, optionally ' \
'warn about any services with usage nearing or exceeding ' \
'their limits. For further help, see ' \
''
diff --git a/awslimitchecker/tests/test_trustedadvisor.py b/awslimitchecker/tests/test_trustedadvisor.py
index ec55be36..5d54876b 100644
--- a/awslimitchecker/tests/test_trustedadvisor.py
+++ b/awslimitchecker/tests/test_trustedadvisor.py
@@ -38,10 +38,7 @@
"""
import sys
-from boto.support.layer1 import SupportConnection
-from boto.support import connect_to_region
-from boto.regioninfo import RegionInfo
-from boto.exception import JSONResponseError, BotoServerError
+from botocore.exceptions import ClientError
from awslimitchecker.trustedadvisor import TrustedAdvisor
from awslimitchecker.services.base import _AwsService
import pytest
@@ -64,8 +61,10 @@
class Test_TrustedAdvisor(object):
def setup(self):
- self.mock_conn = Mock(spec_set=SupportConnection)
- type(self.mock_conn).region = RegionInfo(name='us-east-1')
+ self.mock_conn = Mock()
+ self.mock_client_config = Mock()
+ type(self.mock_client_config).region_name = 'us-east-1'
+ type(self.mock_conn)._client_config = self.mock_client_config
self.cls = TrustedAdvisor()
self.cls.conn = self.mock_conn
@@ -110,45 +109,6 @@ def test_init_sts_external_id(self):
assert cls.mfa_serial_number is None
assert cls.mfa_token is None
- def test_connect(self):
- cls = TrustedAdvisor()
- mock_conn = Mock(spec_set=SupportConnection, name='mock_conn')
- with patch('awslimitchecker.trustedadvisor.boto.connect_support'
- '', autospec=True) as mock_connect:
- mock_connect.return_value = mock_conn
- cls.connect()
- assert cls.conn == mock_conn
- assert mock_connect.mock_calls == [call()]
-
- def test_connect_region(self):
- cls = TrustedAdvisor(account_id='foo', account_role='bar', region='re')
- mock_conn = Mock(spec_set=SupportConnection, name='mock_conn')
- mock_conn_via = Mock(spec_set=SupportConnection, name='mock_conn')
- with patch('awslimitchecker.trustedadvisor.TrustedAdvisor.connect_via'
- '') as mock_connect_via:
- mock_connect_via.return_value = mock_conn_via
- with patch('awslimitchecker.trustedadvisor.boto.connect_support'
- '', autospec=True) as mock_connect:
- mock_connect.return_value = mock_conn
- cls.connect()
- assert cls.conn == mock_conn_via
- assert mock_connect.mock_calls == []
- assert mock_connect_via.mock_calls == [
- call(connect_to_region)
- ]
-
- def test_connect_again(self):
- cls = TrustedAdvisor()
- mock_original_conn = Mock(spec_set=SupportConnection)
- cls.conn = mock_original_conn
- mock_conn = Mock(spec_set=SupportConnection)
- with patch('awslimitchecker.trustedadvisor.boto.connect_support'
- '') as mock_connect:
- mock_connect.return_value = mock_conn
- cls.connect()
- assert cls.conn == mock_original_conn
- assert mock_connect.mock_calls == []
-
def test_update_limits(self):
mock_results = Mock()
with patch('%s.connect' % pb, autospec=True) as mock_connect:
@@ -203,7 +163,7 @@ def test_get_limit_check_id(self):
assert self.mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [call(
self.mock_conn.describe_trusted_advisor_checks,
- 'en', alc_no_paginate=True
+ language='en', alc_no_paginate=True
)]
def test_get_limit_check_id_none(self):
@@ -228,20 +188,24 @@ def test_get_limit_check_id_none(self):
assert self.mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [call(
self.mock_conn.describe_trusted_advisor_checks,
- 'en', alc_no_paginate=True
+ language='en', alc_no_paginate=True
)]
def test_get_limit_check_id_subscription_required(self):
def se_api(foo, language, alc_no_paginate=False):
- status = 400
- reason = 'Bad Request'
- body = {
- 'message': 'AWS Premium Support Subscription is required to '
- 'use this service.',
- '__type': 'SubscriptionRequiredException'
+ response = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 400,
+ 'RequestId': '3cc9b2a8-c6e5-11e5-bc1d-b13dcea36176'
+ },
+ 'Error': {
+ 'Message': 'AWS Premium Support Subscription is required '
+ 'to use this service.',
+ 'Code': 'SubscriptionRequiredException'
+ }
}
- raise JSONResponseError(status, reason, body)
+ raise ClientError(response, 'operation')
assert self.cls.have_ta is True
with patch('awslimitchecker.trustedadvisor'
@@ -254,38 +218,43 @@ def se_api(foo, language, alc_no_paginate=False):
assert self.mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [call(
self.mock_conn.describe_trusted_advisor_checks,
- 'en', alc_no_paginate=True
+ language='en', alc_no_paginate=True
)]
assert mock_logger.mock_calls == [
call.debug("Querying Trusted Advisor checks"),
call.warning("Cannot check TrustedAdvisor: %s",
- "AWS Premium Support "
- "Subscription is required to use this service.")
+ 'AWS Premium Support Subscription is required to '
+ 'use this service.')
]
def test_get_limit_check_id_other_exception(self):
def se_api(foo, language, alc_no_paginate=False):
- status = 400
- reason = 'foobar'
- body = {
- 'message': 'other message',
- '__type': 'OtherException'
+ response = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 400,
+ 'RequestId': '3cc9b2a8-c6e5-11e5-bc1d-b13dcea36176'
+ },
+ 'Error': {
+ 'Message': 'foo',
+ 'Code': 'SomeOtherException'
+ }
}
- raise JSONResponseError(status, reason, body)
+ raise ClientError(response, 'operation')
- with pytest.raises(BotoServerError) as excinfo:
+ with pytest.raises(ClientError) as excinfo:
with patch('%s.boto_query_wrapper' % pbm) as mock_wrapper:
mock_wrapper.side_effect = se_api
self.cls._get_limit_check_id()
assert self.mock_conn.mock_calls == []
assert mock_wrapper.mock_calls == [call(
self.mock_conn.describe_trusted_advisor_checks,
- 'en', alc_no_paginate=True
+ language='en', alc_no_paginate=True
)]
- assert excinfo.value.status == 400
- assert excinfo.value.reason == 'foobar'
- assert excinfo.value.body['__type'] == 'OtherException'
+ assert excinfo.value.response['ResponseMetadata'][
+ 'HTTPStatusCode'] == 400
+ assert excinfo.value.response['Error']['Message'] == 'foo'
+ assert excinfo.value.response['Error']['Code'] == 'SomeOtherException'
def test_poll_id_none(self):
tmp = self.mock_conn.describe_trusted_advisor_check_result
@@ -365,7 +334,7 @@ def test_poll(self):
res = self.cls._poll()
assert tmp.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(tmp, 'foo', alc_no_paginate=True)
+ call(tmp, checkId='foo', language='en', alc_no_paginate=True)
]
assert mock_id.mock_calls == [call(self.cls)]
assert res == {
@@ -444,7 +413,7 @@ def test_poll_region(self):
res = self.cls._poll()
assert tmp.mock_calls == []
assert mock_wrapper.mock_calls == [
- call(tmp, 'foo', alc_no_paginate=True)
+ call(tmp, checkId='foo', language='en', alc_no_paginate=True)
]
assert mock_id.mock_calls == [call(self.cls)]
assert res == {
@@ -523,3 +492,38 @@ def se_set(lname, val):
call._set_ta_limit('blam', 10),
call._set_ta_limit('VPC Elastic IP addresses (EIPs)', 11)
]
+
+ def test_update_services_no_ec2(self):
+
+ mock_autoscale = Mock(spec_set=_AwsService)
+ mock_vpc = Mock(spec_set=_AwsService)
+ services = {
+ 'AutoScaling': mock_autoscale,
+ 'VPC': mock_vpc,
+ }
+ ta_results = {
+ 'AutoScaling': {
+ 'foo': 20,
+ 'bar': 40,
+ },
+ 'EC2': {
+ 'baz': 5,
+ },
+ 'VPC': {
+ 'VPC Elastic IP addresses (EIPs)': 11,
+ }
+ }
+ with patch('awslimitchecker.trustedadvisor'
+ '.logger', autospec=True) as mock_logger:
+ self.cls._update_services(ta_results, services)
+ assert mock_logger.mock_calls == [
+ call.debug("Updating TA limits on all services"),
+ call.info("TrustedAdvisor returned check results for unknown "
+ "service '%s'", 'EC2'),
+ call.info("Done updating TA limits on all services"),
+ ]
+ assert mock_autoscale.mock_calls == [
+ call._set_ta_limit('bar', 40),
+ call._set_ta_limit('foo', 20),
+ ]
+ assert mock_vpc.mock_calls == []
diff --git a/awslimitchecker/tests/test_utils.py b/awslimitchecker/tests/test_utils.py
index 18eeecea..204aacc7 100644
--- a/awslimitchecker/tests/test_utils.py
+++ b/awslimitchecker/tests/test_utils.py
@@ -41,14 +41,9 @@
import pytest
import sys
-from boto.exception import BotoServerError
-from boto.resultset import ResultSet
-from boto.ec2.autoscale.limits import AccountLimits
-
from awslimitchecker.utils import (
- StoreKeyValuePair, dict2cols, invoke_with_throttling_retries,
- boto_query_wrapper, paginate_query, _paginate_resultset, _paginate_dict,
- _get_dict_value_by_path, _set_dict_value_by_path
+ StoreKeyValuePair, dict2cols, boto_query_wrapper, paginate_query,
+ _paginate_dict, _get_dict_value_by_path, _set_dict_value_by_path
)
# https://code.google.com/p/mock/issues/detail?id=249
@@ -153,98 +148,6 @@ def test_empty(self):
assert res == ''
-class TestInvokeWithThrottlingRetries(object):
-
- def setup(self):
- self.retry_count = 0
- self.num_errors = 0
-
- def retry_func(self, *args, **kwargs):
- self.retry_count += 1
- if self.num_errors != 0 and self.retry_count <= self.num_errors:
- body = "\n \n Sender" \
- "\n Throttling
\n Rate exceeded" \
- "\n \n 2ab5db0d-5bca-11e4-9" \
- "592-272cff50ba2d\n"
- raise BotoServerError(400, 'Bad Request', body)
- return True
-
- def other_error(self, *args, **kwargs):
- body = "\n \n Sender" \
- "\n UnauthorizedOperation
\n " \
- "foobar\n " \
- "\n 2ab5db0d-5bca-11e4-9" \
- "592-272cff50ba2d\n"
- raise BotoServerError(400, 'Bad Request', body)
-
- def test_invoke_ok(self):
- cls = Mock()
- cls.func.side_effect = self.retry_func
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- res = invoke_with_throttling_retries(cls.func)
- assert res is True
- assert cls.func.mock_calls == [call()]
- assert mock_sleep.mock_calls == []
-
- def test_invoke_ok_args(self):
- cls = Mock()
- cls.func.side_effect = self.retry_func
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- res = invoke_with_throttling_retries(
- cls.func, 'zzz', 'aaa', foo='bar'
- )
- assert res is True
- assert cls.func.mock_calls == [call('zzz', 'aaa', foo='bar')]
- assert mock_sleep.mock_calls == []
-
- def test_invoke_ok_alc_args(self):
- cls = Mock()
- cls.func.side_effect = self.retry_func
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- res = invoke_with_throttling_retries(
- cls.func, 'zzz', 'aaa', foo='bar', alc_foo='bar')
- assert res is True
- assert cls.func.mock_calls == [call('zzz', 'aaa', foo='bar')]
- assert mock_sleep.mock_calls == []
-
- def test_invoke_other_error(self):
- cls = Mock()
- cls.func.side_effect = self.other_error
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- with pytest.raises(BotoServerError) as ex:
- invoke_with_throttling_retries(cls.func)
- assert cls.func.mock_calls == [call()]
- assert mock_sleep.mock_calls == []
- assert ex.value.code == 'UnauthorizedOperation'
-
- def test_invoke_one_fail(self):
- self.num_errors = 1
- cls = Mock()
- cls.func.side_effect = self.retry_func
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- res = invoke_with_throttling_retries(cls.func)
- assert res is True
- assert cls.func.mock_calls == [call(), call()]
- assert mock_sleep.mock_calls == [call(2)]
-
- def test_invoke_max_fail(self):
- self.num_errors = 6
- cls = Mock()
- cls.func.side_effect = self.retry_func
- with patch('awslimitchecker.utils.time.sleep') as mock_sleep:
- with pytest.raises(BotoServerError) as ex:
- invoke_with_throttling_retries(cls.func)
- assert ex.value.code == 'Throttling'
- assert cls.func.mock_calls == [
- call(), call(), call(), call(), call(), call()
- ]
- assert mock_sleep.mock_calls == [
- call(2), call(4), call(8), call(16), call(32)
- ]
-
-
class TestBotoQueryWrapper(object):
def test_invoke_noargs(self):
@@ -313,83 +216,34 @@ def test_invoke_paginate(self):
class TestPaginateQuery(object):
- def test_resultset_next_token(self):
- result = ResultSet()
- result.next_token = 'foo'
- func = Mock()
- final_result = Mock()
-
- with patch.multiple(
- pbm,
- invoke_with_throttling_retries=DEFAULT,
- _paginate_resultset=DEFAULT,
- _paginate_dict=DEFAULT,
- ) as mocks:
- mocks['invoke_with_throttling_retries'].return_value = result
- mocks['_paginate_resultset'].return_value = final_result
- res = paginate_query(func, 'foo', bar='barval')
- assert res == final_result
- assert mocks['invoke_with_throttling_retries'].mock_calls == [
- call(func, 'foo', bar='barval')
- ]
- assert mocks['_paginate_resultset'].mock_calls == [
- call(result, func, 'foo', bar='barval')
- ]
- assert mocks['_paginate_dict'].mock_calls == []
-
def test_alc_no_paginate(self):
- result = ResultSet()
- result.next_token = 'foo'
+ result = {'foo': 'bar'}
func = Mock()
+ func.return_value = result
with patch.multiple(
pbm,
- invoke_with_throttling_retries=DEFAULT,
- _paginate_resultset=DEFAULT,
_paginate_dict=DEFAULT,
) as mocks:
- mocks['invoke_with_throttling_retries'].return_value = result
res = paginate_query(func, 'foo', bar='barval',
alc_no_paginate=True)
assert res == result
- assert mocks['invoke_with_throttling_retries'].mock_calls == [
- call(func, 'foo', bar='barval', alc_no_paginate=True)
+ assert func.mock_calls == [
+ call('foo', bar='barval')
]
- assert mocks['_paginate_resultset'].mock_calls == []
- assert mocks['_paginate_dict'].mock_calls == []
-
- def test_resultset_no_next(self):
- result = ResultSet()
- func = Mock()
-
- with patch.multiple(
- pbm,
- invoke_with_throttling_retries=DEFAULT,
- _paginate_resultset=DEFAULT,
- _paginate_dict=DEFAULT,
- ) as mocks:
- mocks['invoke_with_throttling_retries'].return_value = result
- res = paginate_query(func, 'foo', bar='barval')
- assert res == result
- assert mocks['invoke_with_throttling_retries'].mock_calls == [
- call(func, 'foo', bar='barval')
- ]
- assert mocks['_paginate_resultset'].mock_calls == []
assert mocks['_paginate_dict'].mock_calls == []
def test_dict(self):
result = {'foo': 'bar'}
func = Mock()
+ func.return_value = result
final_result = Mock()
with patch.multiple(
pbm,
- invoke_with_throttling_retries=DEFAULT,
- _paginate_resultset=DEFAULT,
_paginate_dict=DEFAULT,
logger=DEFAULT,
) as mocks:
- mocks['invoke_with_throttling_retries'].return_value = result
mocks['_paginate_dict'].return_value = final_result
res = paginate_query(
func,
@@ -400,11 +254,9 @@ def test_dict(self):
alc_marker_param='p'
)
assert res == final_result
- assert mocks['invoke_with_throttling_retries'].mock_calls == [
- call(func, 'foo', bar='barval', alc_marker_path=[],
- alc_data_path=[], alc_marker_param='p')
+ assert func.mock_calls == [
+ call('foo', bar='barval')
]
- assert mocks['_paginate_resultset'].mock_calls == []
assert mocks['_paginate_dict'].mock_calls == [
call(result, func, 'foo', bar='barval', alc_marker_path=[],
alc_data_path=[], alc_marker_param='p')
@@ -414,16 +266,14 @@ def test_dict(self):
def test_dict_missing_params(self):
result = {'foo': 'bar'}
func = Mock()
+ func.return_value = result
final_result = Mock()
with patch.multiple(
pbm,
- invoke_with_throttling_retries=DEFAULT,
- _paginate_resultset=DEFAULT,
_paginate_dict=DEFAULT,
logger=DEFAULT,
) as mocks:
- mocks['invoke_with_throttling_retries'].return_value = result
mocks['_paginate_dict'].return_value = final_result
res = paginate_query(
func,
@@ -431,10 +281,9 @@ def test_dict_missing_params(self):
bar='barval'
)
assert res == result
- assert mocks['invoke_with_throttling_retries'].mock_calls == [
- call(func, 'foo', bar='barval')
+ assert func.mock_calls == [
+ call('foo', bar='barval')
]
- assert mocks['_paginate_resultset'].mock_calls == []
assert mocks['_paginate_dict'].mock_calls == []
assert len(mocks['logger'].mock_calls) == 1
args = mocks['logger'].warning.mock_calls[0][1]
@@ -443,44 +292,20 @@ def test_dict_missing_params(self):
"Query returned a dict, but does not have _paginate_dict params "
"set; cannot paginate (`_ repository on GitHub
-2. Create a `virtualenv` to run the code in:
+2. Create a ``virtualenv`` to run the code in:
.. code-block:: bash
@@ -49,9 +49,11 @@ Guidelines
* pep8 compliant with some exceptions (see pytest.ini)
* 100% test coverage with pytest (with valid tests)
-* each :py:class:`~awslimitchecker.services.base._AwsService` subclass
- should only connect to boto once, and should save the connection as ``self.conn``.
- They *must not* connect in the class constructor.
+* Connections to the AWS services should only be made by the class's
+ :py:meth:`~awslimitchecker.connectable.Connectable.connect` and
+ :py:meth:`~awslimitchecker.connectable.Connectable.connect_resource` methods,
+ inherited from the :py:class:`~awslimitchecker.connectable.Connectable`
+ mixin.
* All modules should have (and use) module-level loggers.
* See the section on the AGPL license below.
* **Commit messages** should be meaningful, and reference the Issue number
@@ -69,21 +71,17 @@ Guidelines
Adding New Limits and Checks to Existing Services
-------------------------------------------------
-First, note that all calls to AWS APIs should be handled through
-:py:func:`~awslimitchecker.utils.boto_query_wrapper`, which handles
-retries (with exponential backoff) when API request rate limits are hit,
-and also handles paginated responses if they're not handled by boto.
-
-Second, note that queries which may be paginated **and return a dict** instead
-of a ResultSet object must have the proper parameters for
-:py:func:`~awslimitchecker.utils._paginate_dict` passed in to
-:py:func:`~awslimitchecker.utils.boto_query_wrapper`.
+First, note that any calls to boto3 client ("low-level") APIs that do
+have a Paginator should be handled through
+:py:func:`~awslimitchecker.utils.boto_query_wrapper`, and most have the proper
+parameters passed for :py:func:`~awslimitchecker.utils._paginate_dict` to
+paginate them.
1. Add a new :py:class:`~.AwsLimit` instance to the return value of the
Service class's :py:meth:`~._AwsService.get_limits` method.
2. In the Service class's :py:meth:`~._AwsService.find_usage` method (or a method
called by that, in the case of large or complex services), get the usage information
- via `boto` and pass it to the appropriate AwsLimit object via its
+ via ``self.conn`` and/or ``self.resource_conn`` and pass it to the appropriate AwsLimit object via its
:py:meth:`~.AwsLimit._add_current_usage` method. For anything more than trivial
services (those with only 2-3 limits), ``find_usage()`` should be broken into
multiple methods, generally one per AWS API call.
@@ -101,10 +99,11 @@ Adding New Services
All Services are sublcasses of :py:class:`~awslimitchecker.services.base._AwsService`
using the :py:mod:`abc` module.
-First, note that all calls to AWS APIs should be handled through
-:py:func:`~awslimitchecker.utils.boto_query_wrapper`, which handles
-retries (with exponential backoff) when API request rate limits are hit,
-and also handles paginated responses if they're not handled by boto.
+First, note that any calls to boto3 client ("low-level") APIs that do
+have a Paginator should be handled through
+:py:func:`~awslimitchecker.utils.boto_query_wrapper`, and most have the proper
+parameters passed for :py:func:`~awslimitchecker.utils._paginate_dict` to
+paginate them.
1. The new service name should be in CamelCase, preferably one word (if not one word, it should be underscore-separated).
In ``awslimitchecker/services``, use the ``addservice`` script; this will create a templated service class in the
@@ -117,11 +116,9 @@ and also handles paginated responses if they're not handled by boto.
2. Find all "TODO" comments in the newly-created files; these have instructions on things to change for new services.
Add yourself to the Authors section in the header if desired.
3. Add an import line for the new service in ``awslimitchecker/services/__init__.py``.
-4. Ensure that the :py:meth:`~awslimitchecker.services.base._AwsService.connect` method is properly defined; if ``self.conn`` is not None, then it
- should return None. If ``self.region`` is None, it should set ``self.conn`` to the return value of the appropriate
- ``boto.connect_*()`` method for the service, specifically the connected connection class for the service. Otherwise,
- it should call ``self.connect_via()`` (:py:meth:`~.Connectable.connect_via`) passing in the service's ``connect_to_region()``
- function as the argument. This is done to centralize region and STS connection logic in :py:class:`~._AwsService`.
+4. Be sure to set the class's ``api_name`` attribute to the correct name of the
+ AWS service API (i.e. the parameter passed to `boto3.client `_). This string can
+ typically be found at the top of the Service page in the `boto3 docs `_.
5. Write at least high-level tests; TDD is greatly preferred.
6. Implement all abstract methods from :py:class:`~awslimitchecker.services.base._AwsService` and any other methods you need;
small, easily-testable methods are preferred. Ensure all methods have full documentation. For simple services, you need only
@@ -132,17 +129,16 @@ and also handles paginated responses if they're not handled by boto.
8. Test your code; 100% test coverage is expected, and mocks should be using ``autospec`` or ``spec_set``.
9. Ensure the :py:meth:`~awslimitchecker.services.base._AwsService.required_iam_permissions` method of your new class
returns a list of all IAM permissions required for it to work.
-10. Write integration tests. (currently not implemented; see `issue #21 `_)
-11. Run all tox jobs, or at least one python version, docs and coverage.
-12. Commit the updated documentation to the repository.
-13. As there is no programmatic way to validate IAM policies, once you are done writing your service, grab the
+10. Run all tox jobs, or at least one python version, docs and coverage.
+11. Commit the updated documentation to the repository.
+12. As there is no programmatic way to validate IAM policies, once you are done writing your service, grab the
output of ``awslimitchecker --iam-policy``, login to your AWS account, and navigate to the IAM page.
Click through to create a new policy, paste the output of the ``--iam-policy`` command, and click the
"Validate Policy" button. Correct any errors that occur; for more information, see the AWS IAM docs on
- `Using Policy Validator `_.
+ `Using Policy Validator `_.
It would also be a good idea to run any policy changes through the
- `Policy Simulator `_.
-14. Submit your pull request.
+ `Policy Simulator `_.
+13. Submit your pull request.
.. _development.adding_ta:
@@ -160,7 +156,7 @@ For further information, see :ref:`Internals / Trusted Advisor `_, driven by `tox `_.
+Testing is done via `pytest `_, driven by `tox `_.
* testing is as simple as:
@@ -182,7 +178,16 @@ is ready for it when boto is.
Integration Testing
-------------------
-currently not implemented; see `issue #21 `_
+Integration tests are automatically run in TravisCI for all **non-pull request**
+branches. You can run them manually from your local machine using:
+
+.. code-block:: console
+
+ tox -r -e integration,integration3
+
+These tests simply run ``awslimitchecker``'s CLI script for both usage and limits, for all services and each service individually. Note that this covers a very small amount of the code, as the account that I use for integration tests has virtually no resources in it.
+
+If integration tests fail, check the required IAM permissions. The IAM user that I use for Travis integration tests has a manually-maintained IAM policy.
.. _development.docs:
@@ -215,7 +220,7 @@ work needed. See the guidelines below for information.
* If you're using awslimitchecker in your own software in a way that allows users to interact with it over the network (i.e. in your
deployment or monitoring systems), but not modifying it, you also don't need to do anything special; awslimitchecker will log a
WARNING-level message indicating where the source code of the currently-running version can be obtained. So long as you've installed
- awslimitchecker via Python's packaging system (i.e. with `pip`), its current version and source will be automatically detected. This
+ awslimitchecker via Python's packaging system (i.e. with ``pip``), its current version and source will be automatically detected. This
suffices for the AGPL source code offer provision, so long as it's displayed to users and the currently-running source is unmodified.
* If you wish to modify the source code of awslimitchecker, you need to do is ensure that :py:meth:`~awslimitchecker.version._get_version_info`
always returns correct and accutate information (a publicly-accessible URL to the exact version of the running source code, and a version number).
diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst
index b8d9a494..ac6c3422 100644
--- a/docs/source/getting_started.rst
+++ b/docs/source/getting_started.rst
@@ -49,9 +49,9 @@ Threshold
Requirements
------------
-* Python 2.6 or 2.7 (`boto `_ currently has incomplete python3 support)
+* Python 2.6 through 3.5.
* Python `VirtualEnv `_ and ``pip`` (recommended installation method; your OS/distribution should have packages for these)
-* `boto `_
+* `boto3 `_ >= 1.2.3
.. _getting_started.installing:
@@ -78,7 +78,7 @@ Credentials
Aside from STS, awslimitchecker does nothing with AWS credentials, it leaves that to boto itself.
You must either have your credentials configured in one of boto's supported config
files, or set as environment variables. See
-`boto config `_
+`boto3 config `_
and
`this project's documentation `_
for further information.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index ae575a6d..5fee0051 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -74,8 +74,11 @@ Advisor where available), notifying you when you are approaching or at your limi
Status
-------
-This project is currently in very early development. At this time please consider it beta code and not fully tested in all situations;
-furthermore its API may be changing rapidly. I hope to have this stabilized soon.
+This project has just undergone a relatively major refactor to migrate from
+[boto](http://boto.readthedocs.org) to [boto3](http://boto3.readthedocs.org/),
+along with a refactor of much of the connection and usage gathering code. Until
+it's been running in production for a while, please consider this to be "beta"
+and make every effort to manually confirm the results for your environment.
What It Does
------------
@@ -94,10 +97,9 @@ What It Does
Requirements
------------
-* Python 2.6 through 3.4. Python 2.x is recommended, as `boto `_ (the AWS client library) currently has
- incomplete Python3 support. See the `boto documentation `_ for a list of AWS services that are Python3-compatible.
+* Python 2.6 through 3.5.
* Python `VirtualEnv `_ and ``pip`` (recommended installation method; your OS/distribution should have packages for these)
-* `boto `_ >= 2.32.0
+* `boto3 `_ >= 1.2.3
Installation and Usage
-----------------------
diff --git a/docs/source/internals.rst b/docs/source/internals.rst
index b5ad3c84..86f5f812 100644
--- a/docs/source/internals.rst
+++ b/docs/source/internals.rst
@@ -16,18 +16,18 @@ and should be the only portion directly used by external code.
Each AWS Service is represented by a subclass of the :py:class:`~awslimitchecker.services.base._AwsService` abstract base
class; these Service Classes are responsible for knowing which limits exist for the service they represent, what the
default values for these limits are, querying current limits from the service's API (if supported),
-and how to check the current usage via the AWS API (via :py:mod:`boto`). When the
+and how to check the current usage via the AWS API (``boto3``). When the
Service Classes are instantiated, they build a dict of all of their limits, correlating a string key (the "limit name")
with an :py:class:`~awslimitchecker.limit.AwsLimit` object. The Service Class constructors *must not* make any network
connections; connections are created lazily as needed and stored as a class attribute. This allows us to inspect the
services, limits and default limit values without ever connecting to AWS (this is also used to generate the
:ref:`Supported Limits ` documentation automatically).
-All calls to the AWS APIs should be made through :py:func:`~awslimitchecker.utils.boto_query_wrapper`. This function
-encapsulates both retrying queries with an exponential backoff when queries are throttled due to your account hitting
-the `request rate limit `_
-(via :py:func:`~awslimitchecker.utils.invoke_with_throttling_retries`) and automatically paginating query responses
-that aren't automatically handled by boto.
+All calls to boto3 client ("low-level") APIs that do
+have a Paginator should be handled through
+:py:func:`~awslimitchecker.utils.boto_query_wrapper`, and most have the proper
+parameters passed for :py:func:`~awslimitchecker.utils._paginate_dict` to
+paginate them.
When :py:class:`~awslimitchecker.checker.AwsLimitChecker` is instantiated, it imports :py:mod:`~awslimitchecker.services`
which in turn creates instances of all ``awslimitchecker.services.*`` classes and adds them to a dict mapping the
diff --git a/integration_helper.sh b/integration_helper.sh
index d8cfebef..cf07ce9e 100755
--- a/integration_helper.sh
+++ b/integration_helper.sh
@@ -1,13 +1,24 @@
-#!/bin/bash -e
+#!/bin/bash -x
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then
echo "Not running integration tests for pull requests."
exit 0
fi
+FAILURES=0
# NOTE - these run using the restricted IAM permissions suggested by awslimitchecker;
# if the tests start failing, it's probably because someone with access to the
# limitchecker test AWS account needs to manually update the IAM permissions
# on the test user.
-awslimitchecker -vv -r us-west-2 -l
-awslimitchecker -vv -r us-west-2 -u
+awslimitchecker -vv -r us-west-2 -l || FAILURES=1
+awslimitchecker -vv -r us-west-2 -u || FAILURES=1
+
+while read svcname; do
+ awslimitchecker -vv -r us-west-2 -l -S $svcname || FAILURES=1
+ awslimitchecker -vv -r us-west-2 -u -S $svcname || FAILURES=1
+done< <(awslimitchecker -s)
+
+if [ "$FAILURES" -eq 1 ]; then
+ echo "ERROR: some tests failed!"
+ exit 1
+fi
diff --git a/setup.py b/setup.py
index c37a66ab..87a01d80 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
long_description = file.read()
requires = [
- 'boto>=2.32.0',
+ 'boto3>=1.2.3',
'termcolor>=1.1.0',
'python-dateutil>=2.4.2',
]
@@ -64,8 +64,10 @@
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
'Topic :: Internet',
'Topic :: System :: Monitoring',
]
@@ -84,6 +86,6 @@
description='A script and python module to check your AWS service limits and usage using boto.',
long_description=long_description,
install_requires=requires,
- keywords="AWS EC2 Amazon boto limits cloud",
+ keywords="AWS EC2 Amazon boto boto3 limits cloud",
classifiers=classifiers
)
diff --git a/tox.ini b/tox.ini
index 0cbdf923..b64f4d39 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = {py26,py27,py32,py33,py34,pypy,pypy3}-{unit,versioncheck}, docs, integration, integration3
+envlist = {py26,py27,py32,py33,py34,py35,pypy,pypy3}-{unit,versioncheck}, docs, integration, integration3
[testenv]
deps =
@@ -15,7 +15,7 @@ deps =
pytest-flakes
mock
freezegun
- boto==2.32.0
+ boto3==1.2.3
pytest-blockage
virtualenv
@@ -23,10 +23,20 @@ passenv=TRAVIS*
setenv =
TOXINIDIR={toxinidir}
TOXDISTDIR={distdir}
+ # when we call ``awslimitchecker.tests.services.result_fixtures.get_boto3_resource_model()``
+ # botocore.session.Session will attempt HTTP requests to 169.254.169.254 to
+ # retrieve Instance Metadata and IAM roles. In order to prevent this,
+ # we simply export bogus AWS keys.
+ AWS_ACCESS_KEY_ID=a
+ AWS_SECRET_ACCESS_KEY=b
+ AWS_DEFAULT_REGION=us-east-1
sitepackages = False
whitelist_externals = env
commands =
+ python --version
+ virtualenv --version
+ pip --version
env
pip freeze
unit: py.test -rxs -vv --pep8 --flakes --blockage -m "not versioncheck" --cov-report term-missing --cov-report xml --cov-report html --cov-config {toxinidir}/.coveragerc --cov=awslimitchecker {posargs} awslimitchecker
@@ -52,18 +62,28 @@ deps =
# mock >= 1.1.0 no longer works with py26
mock==1.0.1
freezegun
- boto==2.32.0
pytest-blockage
+ boto3==1.2.3
virtualenv
passenv=TRAVIS*
setenv =
TOXINIDIR={toxinidir}
TOXDISTDIR={distdir}
+ # when we call ``awslimitchecker.tests.services.result_fixtures.get_boto3_resource_model()``
+ # botocore.session.Session will attempt HTTP requests to 169.254.169.254 to
+ # retrieve Instance Metadata and IAM roles. In order to prevent this,
+ # we simply export bogus AWS keys.
+ AWS_ACCESS_KEY_ID=a
+ AWS_SECRET_ACCESS_KEY=b
+ AWS_DEFAULT_REGION=us-east-1
sitepackages = False
whitelist_externals = env
commands =
+ python --version
+ virtualenv --version
+ pip --version
env
pip freeze
unit: py.test -rxs -vv --pep8 --flakes --blockage -m "not versioncheck" {posargs} awslimitchecker
@@ -72,9 +92,64 @@ commands =
# always recreate the venv
recreate = True
+[testenv:py32-versioncheck]
+deps =
+ cov-core
+ coverage==3.7.1
+ execnet
+ pep8
+ py
+ pytest>=2.8.3
+ pytest-cache
+ pytest-cov
+ pytest-pep8
+ pytest-flakes
+ mock
+ freezegun
+ boto3==1.2.3
+ pytest-blockage
+ virtualenv==13.1.2
+
+passenv=TRAVIS*
+setenv =
+ TOXINIDIR={toxinidir}
+ TOXDISTDIR={distdir}
+ # when we call ``awslimitchecker.tests.services.result_fixtures.get_boto3_resource_model()``
+ # botocore.session.Session will attempt HTTP requests to 169.254.169.254 to
+ # retrieve Instance Metadata and IAM roles. In order to prevent this,
+ # we simply export bogus AWS keys.
+ AWS_ACCESS_KEY_ID=a
+ AWS_SECRET_ACCESS_KEY=b
+ AWS_DEFAULT_REGION=us-east-1
+
+sitepackages = False
+whitelist_externals = env
+commands =
+ python --version
+ virtualenv --version
+ pip --version
+ env
+ pip freeze
+ unit: py.test -rxs -vv --pep8 --flakes --blockage -m "not versioncheck" --cov-report term-missing --cov-report xml --cov-report html --cov-config {toxinidir}/.coveragerc --cov=awslimitchecker {posargs} awslimitchecker
+ versioncheck: py.test -rxs -vv -s --pep8 --flakes --blockage -m "versioncheck" {posargs} awslimitchecker
+
+# always recreate the venv
+recreate = True
+
[testenv:docs]
# this really just makes sure README.rst will parse on pypi
passenv = CI TRAVIS* CONTINUOUS_INTEGRATION AWS*
+setenv =
+ TOXINIDIR={toxinidir}
+ TOXDISTDIR={distdir}
+ # when we call ``awslimitchecker.tests.services.result_fixtures.get_boto3_resource_model()``
+ # botocore.session.Session will attempt HTTP requests to 169.254.169.254 to
+ # retrieve Instance Metadata and IAM roles. In order to prevent this,
+ # we simply export bogus AWS keys.
+ AWS_ACCESS_KEY_ID=a
+ AWS_SECRET_ACCESS_KEY=b
+ AWS_DEFAULT_REGION=us-east-1
+
deps =
docutils
pygments
@@ -82,6 +157,9 @@ deps =
sphinx_rtd_theme
basepython = python2.7
commands =
+ python --version
+ virtualenv --version
+ pip --version
env
pip freeze
rst2html.py --halt=2 README.rst /dev/null
@@ -97,17 +175,29 @@ commands =
[testenv:integration]
# this really just makes sure README.rst will parse on pypi
passenv = CI TRAVIS* CONTINUOUS_INTEGRATION AWS*
+setenv =
+ TOXINIDIR={toxinidir}
+ TOXDISTDIR={distdir}
basepython = python2.7
sitepackages = False
commands =
+ python --version
+ virtualenv --version
+ pip --version
pip freeze
{toxinidir}/integration_helper.sh
[testenv:integration3]
# this really just makes sure README.rst will parse on pypi
passenv = CI TRAVIS* CONTINUOUS_INTEGRATION AWS*
+setenv =
+ TOXINIDIR={toxinidir}
+ TOXDISTDIR={distdir}
basepython = python3.4
sitepackages = False
commands =
+ python --version
+ virtualenv --version
+ pip --version
pip freeze
{toxinidir}/integration_helper.sh