From feeb11686130e044196af4450ce14ba6161259d9 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Tue, 7 Jul 2015 17:16:06 +0100 Subject: [PATCH 01/11] INF-776 Adding IIS Node Type, Autoscaling Groups, Etc --- tyr/policies/ec2.py | 234 ++++++++++++++++++++++++++++++++++++ tyr/servers/iis/__init__.py | 1 + tyr/servers/iis/node.py | 76 ++++++++++++ tyr/servers/server.py | 86 +++++++++++-- 4 files changed, 384 insertions(+), 13 deletions(-) create mode 100644 tyr/servers/iis/__init__.py create mode 100644 tyr/servers/iis/node.py diff --git a/tyr/policies/ec2.py b/tyr/policies/ec2.py index 56adde1..c3c000d 100644 --- a/tyr/policies/ec2.py +++ b/tyr/policies/ec2.py @@ -84,3 +84,237 @@ } ] }""" + +allow_create_tags = """{ + "Statement": [ + { "Sid": "Stmt1357615676069", + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ] +}""" + + +allow_web_initialization_prod = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "s3:GetObject" + ], + "Sid": "Stmt1370289990000", + "Resource": [ + "arn:aws:s3:::hudl-config/common/*", + "arn:aws:s3:::hudl-config/prod-mv-web/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "s3:ListBucket" + ], + "Sid": "Stmt1370290042000", + "Condition": { + "StringLike": { + "s3:prefix": "prod-mv-web/*" + } + }, + "Resource": [ + "arn:aws:s3:::hudl-config" + ], + "Effect": "Allow" + }, + { + "Action": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:PutMetricData", + "cloudwatch:SetAlarmState" + ], + "Sid": "Stmt1370290134000", + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +}""" + + +allow_web_initialization_stage = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "s3:GetObject" + ], + "Sid": "Stmt1370289990000", + "Resource": [ + "arn:aws:s3:::hudl-config/common/*", + "arn:aws:s3:::hudl-config/stage-mv-web/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "s3:ListBucket" + ], + "Sid": "Stmt1370290042000", + "Condition": { + "StringLike": { + "s3:prefix": "stage-mv-web/*" + } + }, + "Resource": [ + "arn:aws:s3:::hudl-config" + ], + "Effect": "Allow" + }, + { + "Action": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:PutMetricData", + "cloudwatch:SetAlarmState" + ], + "Sid": "Stmt1370290134000", + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +}""" + + +allow_outpost_sns_prod = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1390578067000", + "Effect": "Allow", + "Action": [ + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:Publish", + "sns:Subscribe" + ], + "Resource": [ + "arn:aws:sns:us-east-1:761584570493:prod-outpost", + "arn:aws:sns:us-east-1:761584570493:alyx3" + ] + }, + { + "Sid": "Stmt1390578067001", + "Effect": "Allow", + "Action": [ + "sns:ConfirmSubscription", + "sns:Unsubscribe" + ], + "Resource": [ + "arn:aws:sns:us-east-1:761584570493:*" + ] + } + ] +}""" + +allow_outpost_sns_stage = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1390578067000", + "Effect": "Allow", + "Action": [ + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:Publish", + "sns:Subscribe" + ], + "Resource": [ + "arn:aws:sns:us-east-1:761584570493:stage-outpost", + "arn:aws:sns:us-east-1:761584570493:alyx3" + ] + }, + { + "Sid": "Stmt1390578067001", + "Effect": "Allow", + "Action": [ + "sns:ConfirmSubscription", + "sns:Unsubscribe" + ], + "Resource": [ + "arn:aws:sns:us-east-1:761584570493:*" + ] + } + ] +}""" + + +allow_set_cloudwatch_alarms = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1399496077000", + "Effect": "Allow", + "Action": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm" + ], + "Resource": [ + "*" + ] + } + ] +}""" + +allow_remove_cloudwatch_alarms = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1399498965000", + "Effect": "Allow", + "Action": [ + "cloudwatch:DeleteAlarms" + ], + "Resource": [ + "*" + ] + } + ] +}""" + +allow_deploy_web_updates = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1408567829000", + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::hudl-web-updates" + ] + }, + { + "Sid": "Stmt1408567479000", + "Effect": "Allow", + "Action": [ + "s3:GetObject" + ], + "Resource": [ + "arn:aws:s3:::hudl-web-updates*" + ] + } + ] +}""" + diff --git a/tyr/servers/iis/__init__.py b/tyr/servers/iis/__init__.py new file mode 100644 index 0000000..1f712e0 --- /dev/null +++ b/tyr/servers/iis/__init__.py @@ -0,0 +1 @@ +from server import IISNode diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py new file mode 100644 index 0000000..e197390 --- /dev/null +++ b/tyr/servers/iis/node.py @@ -0,0 +1,76 @@ +from tyr.servers.server import Server + + +class IISNode(Server): + + SERVER_TYPE = 'web' + + AMI_NAME = '' + + IAM_ROLE_POLICIES = [ + 'allow-describe-instances', + 'allow-describe-tags', + 'allow-describe-elbs', + 'allow-set-cloudwatch-alarms', + 'allow-remove-cloudwatch-alarms', + 'allow_deploy_web_updates', + + ] + + def __init__(self, group=None, server_type=None, instance_type=None, + environment=None, ami=None, region=None, role=None, + keypair=None, availability_zone=None, security_groups=None, + block_devices=None, chef_path=None, subnet_id=None, + dns_zones=None, desired_capacity=None, max_capacity=None, + min_capacity=None): + + if server_type is None: + server_type = self.SERVER_TYPE + + super(IISNode, self).__init__(group=group, server_type=server_type, + instance_type=instance_type, + environment=environment, + ami=ami, + region=region, + role=role, + keypair=keypair, + availability_zone=availability_zone, + security_groups=security_groups, + block_devices=block_devices, + chef_path=chef_path, + subnet_id=subnet_id, + dns_zones=dns_zones, + desired_capacity=desired_capacity, + max_capacity=max_capacity, + min_capacity=min_capacity) + + def configure(self): + super(IISNode, self).configure() + + env_prefix = self.environment[0] + + self.security_groups = [ + "management", + "chef-client", + "{0}-queueproc-jobs".format(env_prefix), + "{0}-queues-jobs".format(env_prefix), + "{0}-web".format(env_prefix), + "{0}-nginx".format(env_prefix), + ] + + self.ingress_groups_to_add = [ + "{0}-web".format(env_prefix), + "{0}-nginx".format(env_prefix) + ] + + self.IAM_ROLE_POLICIES.append('allow_web_initialization_{0}' + .format(self.environment)) + self.IAM_ROLE_POLICIES.append('allow_outpost_sns_prod_{0}' + .format(self.environment)) + self.IAM_ROLE_POLICIES.append('{0}-{1}-web' + .format(self.environment)) + + self.resolve_iam_role() + self.launch_configuration() + self.autoscale_group() + self.ingress_rules() diff --git a/tyr/servers/server.py b/tyr/servers/server.py index eec316f..979ef10 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -9,26 +9,30 @@ from boto.ec2.networkinterface import NetworkInterfaceSpecification import json from boto.ec2.networkinterface import NetworkInterfaceCollection +from boto.ec2.autoscale import AutoScaleConnection +from boto.ec2.autoscale import LaunchConfiguration import urllib from boto.vpc import VPCConnection from paramiko.client import AutoAddPolicy, SSHClient from tyr.policies import policies + class Server(object): - NAME_TEMPLATE='{envcl}-{location}-{index}' - NAME_SEARCH_PREFIX='{envcl}-{location}-' - NAME_AUTO_INDEX=True + NAME_TEMPLATE = '{envcl}-{location}-{index}' + NAME_SEARCH_PREFIX = '{envcl}-{location}-' + NAME_AUTO_INDEX = True IAM_ROLE_POLICIES = [] - CHEF_RUNLIST=['role[RoleBase]'] + CHEF_RUNLIST = ['role[RoleBase]'] def __init__(self, group=None, server_type=None, instance_type=None, environment=None, ami=None, region=None, role=None, keypair=None, availability_zone=None, security_groups=None, block_devices=None, chef_path=None, subnet_id=None, - dns_zones=None): + dns_zones=None, desired_capacity=None, max_capacity=None, + min_capacity=None): self.instance_type = instance_type self.group = group @@ -45,6 +49,9 @@ def __init__(self, group=None, server_type=None, instance_type=None, self.dns_zones = dns_zones self.subnet_id = subnet_id self.vpc_id = None + self.desired_capacity = desired_capacity + self.max_capacity = max_capacity + self.min_capacity = min_capacity def establish_logger(self): @@ -95,6 +102,12 @@ def configure(self): self.log.warn('No environment provided') self.environment = 'test' + if self.desired_capacity is None: + self.desired_capacity = 3 + + if self.maximum_capacity is None: + self.maximum_capacity = 3 + self.environment = self.environment.lower() self.log.info('Using Environment "{environment}"'.format( @@ -118,6 +131,7 @@ def configure(self): self.establish_ec2_connection() self.establish_iam_connection() self.establish_route53_connection() + self.establish_autoscale_connection() if self.ami is None: self.log.warn('No AMI provided') @@ -464,28 +478,28 @@ def get_subnet_vpc_id(self, subnet_id): def resolve_security_groups(self): filters = {} self.log.info("Resolving security groups") - + # If the server is being spun up in a vpc, search only that vpc exists = lambda s: s in [group.name for group in self.ec2.get_all_security_groups() if self.vpc_id == group.vpc_id] for index, group in enumerate(self.security_groups): - + if not exists(group): - self.log.info('Security Group {group} does not exist'.format( - group=group)) + self.log.info('Security Group {group} does not exist' + .format(group=group)) if self.subnet_id is None: self.ec2.create_security_group(group, group) else: vpc_conn = VPCConnection() vpc_conn.create_security_group( group, group, vpc_id=self.vpc_id) - self.log.info('Created security group {group}'.format( - group=group)) + self.log.info('Created security group {group}' + .format(group=group)) else: - self.log.info('Security Group {group} already exists'.format( - group=group)) + self.log.info('Security Group {group} already exists' + .format(group=group)) def resolve_iam_role(self): @@ -649,6 +663,14 @@ def establish_route53_connection(self): self.log.error(str(e)) raise e + def establish_autoscale_connection(self): + try: + self.autoscale = boto.ec2.autoscale.connect_to_region(self.region) + self.log.info('Established connection to autoscale') + except Exception, e: + self.log.error(str(e)) + raise e + def get_security_group_ids(self, security_groups, vpc_id=None): security_group_ids = [] for group in security_groups: @@ -786,6 +808,44 @@ def route(self, wait=False): self.log.error(str(e)) raise e + def launch_configuration(self): + try: + self.autoscale.get_all_launch_configurations(names=[self.envcl]) + except: + log.info("Creating new launch_configuration: {0}".format(self.envcl)) + lc = LaunchConfiguration(name=self.envcl, image_id=self.ami, + key_name=self.keypair, security_groups=self.security_groups) + conn.create_launch_configuration(lc) + self.launnch_configuration = lc + + + def autoscaling_group(self): + try: + exiting_asg = self.autoscale.get_all_groups(names=[self.envcl]) + except: + if not existing_asg: + log.info("Creating new autoscaling group: {0}".format(self.group)) + + ag = AutoScalingGroup(name=self.group, + load_balancers=self.load_balancers, + availability_zones=self.availability_zones, + desired_capacity=self.desired_capacity, + health_check_period=self.health_check_grace_period, + launch_config=self.launnch_configuration, + min_size=self.min_size, + max_size=self.max_size, + default_cooldown=cooldown, + connection=self.autoscale) + conn.create_auto_scaling_group(ag) + + def ingress_rules(self): + try: + grps = conn.get_all_security_groups(groupnames=[self.envcl]) + grps[0].authorize(src_grp=self.ingress_groups_to_add) + except: + self.log.error("Unable to add ingress rules!") + raise e + @property def connection(self): From 124f35e69285724a06bea9577d439b5021ad6fc5 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Fri, 10 Jul 2015 10:00:45 +0100 Subject: [PATCH 02/11] Some fixes following testing --- tyr/policies/__init__.py | 9 +++++++- tyr/policies/ec2.py | 8 +++---- tyr/servers/iis/__init__.py | 2 +- tyr/servers/iis/node.py | 18 ++++++++------- tyr/servers/server.py | 45 ++++++++++++++++++++++++++----------- 5 files changed, 55 insertions(+), 27 deletions(-) diff --git a/tyr/policies/__init__.py b/tyr/policies/__init__.py index 39ea88c..157fda2 100644 --- a/tyr/policies/__init__.py +++ b/tyr/policies/__init__.py @@ -14,5 +14,12 @@ 'allow-get-nginx-config': s3.allow_get_nginx_config, 'allow-describe-elbs': elb.allow_describe_elbs, 'allow-modify-nginx-elbs-stage': elb.allow_modify_nginx_elbs_stage, - 'allow-modify-nginx-elbs-prod': elb.allow_modify_nginx_elbs_prod + 'allow-modify-nginx-elbs-prod': elb.allow_modify_nginx_elbs_prod, + 'allow-outpost-sns-stage': ec2.allow_outpost_sns_stage, + 'allow-outpost-sns-prod': ec2.allow_outpost_sns_prod, + 'allow-web-initialization-prod': ec2.allow_web_initialization_prod, + 'allow-web-initialization-stage': ec2.allow_web_initialization_stage, + 'allow-set-cloudwatch-alarms': ec2.allow_set_cloudwatch_alarms, + 'allow-remove-cloudwatch-alarms': ec2.allow_remove_cloudwatch_alarms, + 'allow-deploy-web-updates': ec2.allow_deploy_web_updates } diff --git a/tyr/policies/ec2.py b/tyr/policies/ec2.py index c3c000d..ec50a8b 100644 --- a/tyr/policies/ec2.py +++ b/tyr/policies/ec2.py @@ -110,7 +110,7 @@ "Sid": "Stmt1370289990000", "Resource": [ "arn:aws:s3:::hudl-config/common/*", - "arn:aws:s3:::hudl-config/prod-mv-web/*" + "arn:aws:s3:::hudl-config/p-mv-web/*" ], "Effect": "Allow" }, @@ -121,7 +121,7 @@ "Sid": "Stmt1370290042000", "Condition": { "StringLike": { - "s3:prefix": "prod-mv-web/*" + "s3:prefix": "p-mv-web/*" } }, "Resource": [ @@ -158,7 +158,7 @@ "Sid": "Stmt1370289990000", "Resource": [ "arn:aws:s3:::hudl-config/common/*", - "arn:aws:s3:::hudl-config/stage-mv-web/*" + "arn:aws:s3:::hudl-config/s-mv-web/*" ], "Effect": "Allow" }, @@ -169,7 +169,7 @@ "Sid": "Stmt1370290042000", "Condition": { "StringLike": { - "s3:prefix": "stage-mv-web/*" + "s3:prefix": "s-mv-web/*" } }, "Resource": [ diff --git a/tyr/servers/iis/__init__.py b/tyr/servers/iis/__init__.py index 1f712e0..f450330 100644 --- a/tyr/servers/iis/__init__.py +++ b/tyr/servers/iis/__init__.py @@ -1 +1 @@ -from server import IISNode +from node import IISNode diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index e197390..346a5f0 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -13,7 +13,7 @@ class IISNode(Server): 'allow-describe-elbs', 'allow-set-cloudwatch-alarms', 'allow-remove-cloudwatch-alarms', - 'allow_deploy_web_updates', + 'allow-deploy-web-updates', ] @@ -45,8 +45,6 @@ def __init__(self, group=None, server_type=None, instance_type=None, min_capacity=min_capacity) def configure(self): - super(IISNode, self).configure() - env_prefix = self.environment[0] self.security_groups = [ @@ -63,14 +61,18 @@ def configure(self): "{0}-nginx".format(env_prefix) ] - self.IAM_ROLE_POLICIES.append('allow_web_initialization_{0}' - .format(self.environment)) - self.IAM_ROLE_POLICIES.append('allow_outpost_sns_prod_{0}' + self.ports_to_authorize = [9000, 9001, 8095, 8096] + + self.IAM_ROLE_POLICIES.append('allow-web-initialization-{0}' .format(self.environment)) - self.IAM_ROLE_POLICIES.append('{0}-{1}-web' + self.IAM_ROLE_POLICIES.append('allow-outpost-sns-{0}' .format(self.environment)) + #self.IAM_ROLE_POLICIES.append('{0}-{1}-web' + # .format(self.environment, self.group)) + + super(IISNode, self).configure() self.resolve_iam_role() self.launch_configuration() - self.autoscale_group() + self.autoscaling_group() self.ingress_rules() diff --git a/tyr/servers/server.py b/tyr/servers/server.py index 979ef10..3e2811f 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -32,7 +32,8 @@ def __init__(self, group=None, server_type=None, instance_type=None, keypair=None, availability_zone=None, security_groups=None, block_devices=None, chef_path=None, subnet_id=None, dns_zones=None, desired_capacity=None, max_capacity=None, - min_capacity=None): + min_capacity=None, ingress_groups_to_add=None, + ports_to_authorize=None, classic_link=False): self.instance_type = instance_type self.group = group @@ -52,6 +53,9 @@ def __init__(self, group=None, server_type=None, instance_type=None, self.desired_capacity = desired_capacity self.max_capacity = max_capacity self.min_capacity = min_capacity + self.ingress_groups_to_add = ingress_groups_to_add + self.ports_to_authorize = ports_to_authorize + self.classic_link = classic_link def establish_logger(self): @@ -105,8 +109,8 @@ def configure(self): if self.desired_capacity is None: self.desired_capacity = 3 - if self.maximum_capacity is None: - self.maximum_capacity = 3 + if self.max_capacity is None: + self.max_capacity = 3 self.environment = self.environment.lower() @@ -813,18 +817,22 @@ def launch_configuration(self): self.autoscale.get_all_launch_configurations(names=[self.envcl]) except: log.info("Creating new launch_configuration: {0}".format(self.envcl)) - lc = LaunchConfiguration(name=self.envcl, image_id=self.ami, - key_name=self.keypair, security_groups=self.security_groups) + #userDataPlainText = "{'bucket':'hudl-config','key':'" + self.environment[0] + "-mv-web/init.config.json','mongos': '$Mongos', 'mongoServers': '$MongoServers'}" + lc = LaunchConfiguration(name=self.envcl, + image_id=self.ami, + key_name=self.keypair, + security_groups=self.security_groups) conn.create_launch_configuration(lc) self.launnch_configuration = lc - def autoscaling_group(self): try: exiting_asg = self.autoscale.get_all_groups(names=[self.envcl]) + self.log.info('Autoscaling group already exists.') except: + raise if not existing_asg: - log.info("Creating new autoscaling group: {0}".format(self.group)) + self.log.info("Creating new autoscaling group: {0}".format(self.group)) ag = AutoScalingGroup(name=self.group, load_balancers=self.load_balancers, @@ -839,12 +847,23 @@ def autoscaling_group(self): conn.create_auto_scaling_group(ag) def ingress_rules(self): - try: - grps = conn.get_all_security_groups(groupnames=[self.envcl]) - grps[0].authorize(src_grp=self.ingress_groups_to_add) - except: - self.log.error("Unable to add ingress rules!") - raise e + grp_id = self.get_security_group_ids([self.envcl]) + grps = self.ec2.get_all_security_groups(group_ids=grp_id) + for ing in self.ingress_groups_to_add: + self.log.info('Adding ingress rules for group: {0}' + .format(ing)) + grp_id = self.get_security_group_ids([ing]) + grp_obj = self.ec2.get_all_security_groups(group_ids=grp_id[0])[0] + for port in self.ports_to_authorize: + self.log.info("Adding port {0} to {1}.".format(port, ing)) + try: + grps[0].authorize(ip_protocol='tcp', + from_port=port, + to_port=port, + src_group=grp_obj) + except boto.exception.EC2ResponseError: + self.log.warning("Unable to add ingress rule. May already exist.") + @property def connection(self): From 17fa35ee1f97952e61bd5495aa4c6656e8140bfc Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Fri, 10 Jul 2015 16:56:27 +0100 Subject: [PATCH 03/11] Refactored following testing --- tyr/autoscaler/__init__.py | 1 + tyr/autoscaler/autoscaler.py | 93 ++++++++++++++++++ tyr/clusters/__init__.py | 1 + tyr/clusters/iis.py | 119 ++++++++++++++++++++++ tyr/servers/iis/node.py | 29 +++--- tyr/servers/server.py | 184 +++++++++++++---------------------- 6 files changed, 291 insertions(+), 136 deletions(-) create mode 100644 tyr/autoscaler/__init__.py create mode 100644 tyr/autoscaler/autoscaler.py create mode 100644 tyr/clusters/iis.py diff --git a/tyr/autoscaler/__init__.py b/tyr/autoscaler/__init__.py new file mode 100644 index 0000000..8d53e68 --- /dev/null +++ b/tyr/autoscaler/__init__.py @@ -0,0 +1 @@ +from autoscaler import AutoScaler \ No newline at end of file diff --git a/tyr/autoscaler/autoscaler.py b/tyr/autoscaler/autoscaler.py new file mode 100644 index 0000000..0686ea9 --- /dev/null +++ b/tyr/autoscaler/autoscaler.py @@ -0,0 +1,93 @@ +from boto.ec2.autoscale import AutoScaleConnection +from boto.ec2.autoscale import LaunchConfiguration +from boto.ec2.autoscale import AutoScalingGroup +import boto.ec2 +import logging + + +class AutoScaler(object): + ''' + Autoscaler setup class + ''' + + log = logging.getLogger('Clusters.AutoScaler') + log.setLevel(logging.DEBUG) + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter( + '%(asctime)s [%(name)s] %(levelname)s: %(message)s', + datefmt='%H:%M:%S') + ch.setFormatter(formatter) + log.addHandler(ch) + + def __init__(self, launch_configuration, + autoscaling_group, + node_obj, + desired_capacity=1, + max_size=1, + min_size=1, + default_cooldown=300, + availability_zone=None, + health_check_grace_period=300): + + self.launch_configuration = launch_configuration + self.autoscaling_group = autoscaling_group + self.desired_capacity = desired_capacity + self.node_obj = node_obj + self.max_size = max_size + self.min_size = min_size + self.default_cooldown = default_cooldown + self.health_check_grace_period = health_check_grace_period + + def establish_autoscale_connection(self): + try: + self.conn = boto.ec2.autoscale.connect_to_region(self.node_obj.region) + self.log.info('Established connection to autoscale') + except: + raise + + def create_launch_configuration(self): + self.log.info("Getting launch_configuration: {0}" + .format(self.launch_configuration)) + + lc = self.conn.get_all_launch_configurations( + names=[self.launch_configuration]) + if not lc: + self.log.info("Creating new launch_configuration: {0}" + .format(self.launch_configuration)) + lc = LaunchConfiguration(name=self.launch_configuration, + image_id=self.node_obj.ami, + key_name=self.node_obj.keypair, + security_groups=self.node_obj.get_security_group_ids( + self.node_obj.security_groups), + user_data=self.node_obj.user_data) + self.conn.create_launch_configuration(lc) + self.launch_configuration = lc + + def create_autoscaling_group(self): + existing_asg = self.conn.get_all_groups( + names=[self.autoscaling_group]) + + if not existing_asg: + self.log.info("Creating new autoscaling group: {0}" + .format(self.autoscaling_group)) + + ag = AutoScalingGroup(name=self.autoscaling_group, + availability_zones=[self.node_obj.availability_zone], + desired_capacity=self.desired_capacity, + health_check_period=self.health_check_grace_period, + launch_config=self.launch_configuration, + min_size=self.min_size, + max_size=self.max_size, + default_cooldown=self.default_cooldown, + vpc_zone_identifier=[self.node_obj.subnet_id], + connection=self.conn) + self.conn.create_auto_scaling_group(ag) + else: + self.log.info('Autoscaling group {0} already exists.' + .format(self.autoscaling_group)) + + def autorun(self): + self.establish_autoscale_connection() + self.create_launch_configuration() + self.create_autoscaling_group() diff --git a/tyr/clusters/__init__.py b/tyr/clusters/__init__.py index 2008893..4f72526 100644 --- a/tyr/clusters/__init__.py +++ b/tyr/clusters/__init__.py @@ -1 +1,2 @@ from mongo import MongoCluster +from iis import IISCluster diff --git a/tyr/clusters/iis.py b/tyr/clusters/iis.py new file mode 100644 index 0000000..cc9c895 --- /dev/null +++ b/tyr/clusters/iis.py @@ -0,0 +1,119 @@ +import logging +from tyr.servers.iis import IISNode +from tyr.autoscaler import AutoScaler + +class IISCluster(): + log = logging.getLogger('Clusters.IIS') + log.setLevel(logging.DEBUG) + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter( + '%(asctime)s [%(name)s] %(levelname)s: %(message)s', + datefmt='%H:%M:%S') + ch.setFormatter(formatter) + log.addHandler(ch) + + def __init__(self, group=None, + server_type=None, + instance_type=None, + environment=None, + ami=None, + region=None, + subnet_id=None, + role=None, + keypair=None, + security_groups=None, + autoscaling_group=None, + desired_capacity=1, + max_size=1, + min_size=1, + launch_nodes=0, + default_cooldown=300, + availability_zone=None, + health_check_grace_period=300, + launch_configuration=None): + + self.nodes = [] + + self.group = group + self.server_type = server_type + self.instance_type = instance_type + self.environment = environment + self.ami = ami + self.region = region + self.role = role + self.subnet_id = subnet_id + self.keypair = keypair + self.security_groups = security_groups + self.autoscaling_group = autoscaling_group + self.desired_capacity = desired_capacity + self.max_size = max_size + self.min_size = min_size + self.launch_nodes = launch_nodes + self.default_cooldown = default_cooldown + self.availability_zone = availability_zone + self.health_check_grace_period = health_check_grace_period + self.launch_configuration = launch_configuration + + def provision(self): + + if not self.launch_configuration: + self.launch_configuration = "{0}-{1}-web".format( + self.environment[0], self.group) + if not self.autoscaling_group: + if self.subnet_id: + templ = "{0}-{1}-web-asg-vpn" + else: + templ = "{0}-{1}-web-asg" + self.autoscaling_group = templ.format( + self.environment[0], self.group) + + node = IISNode(group=self.group, + server_type=self.server_type, + instance_type=self.instance_type, + environment=self.environment, + ami=self.ami, + region=self.region, + role=self.role, + keypair=self.keypair, + availability_zone=self.availability_zone, + security_groups=self.security_groups, + subnet_id=self.subnet_id) + node.configure() + + auto = AutoScaler(launch_configuration=self.launch_configuration, + autoscaling_group=self.autoscaling_group, + desired_capacity=self.desired_capacity, + max_size=self.max_size, + min_size=self.min_size, + default_cooldown=self.default_cooldown, + availability_zone=self.availability_zone, + health_check_grace_period=self.health_check_grace_period, + node_obj=node) + auto.autorun() + + # You can launch instances manually if required, but autoscale will + # launch them automatically if desired_capacity is set > 0 + for i in range(self.launch_nodes): + + node = IISNode(group=self.group, + server_type=self.server_type, + instance_type=self.instance_type, + environment=self.environment, + ami=self.ami, + region=self.region, + role=self.role, + keypair=self.keypair, + availability_zone=self.availability_zone, + security_groups=self.security_groups, + subnet_id=self.subnet_id) + + node.autorun() + self.nodes.append(node) + + def baked(self): + + return all([node.baked() for node in self.nodes]) + + def autorun(self): + self.provision() diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index 346a5f0..a9d1595 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -6,6 +6,8 @@ class IISNode(Server): SERVER_TYPE = 'web' AMI_NAME = '' + # Do not run chef + CHEF_RUNLIST = [] IAM_ROLE_POLICIES = [ 'allow-describe-instances', @@ -20,9 +22,7 @@ class IISNode(Server): def __init__(self, group=None, server_type=None, instance_type=None, environment=None, ami=None, region=None, role=None, keypair=None, availability_zone=None, security_groups=None, - block_devices=None, chef_path=None, subnet_id=None, - dns_zones=None, desired_capacity=None, max_capacity=None, - min_capacity=None): + subnet_id=None): if server_type is None: server_type = self.SERVER_TYPE @@ -36,24 +36,17 @@ def __init__(self, group=None, server_type=None, instance_type=None, keypair=keypair, availability_zone=availability_zone, security_groups=security_groups, - block_devices=block_devices, - chef_path=chef_path, subnet_id=subnet_id, - dns_zones=dns_zones, - desired_capacity=desired_capacity, - max_capacity=max_capacity, - min_capacity=min_capacity) + dns_zones=None) - def configure(self): env_prefix = self.environment[0] self.security_groups = [ - "management", - "chef-client", - "{0}-queueproc-jobs".format(env_prefix), - "{0}-queues-jobs".format(env_prefix), + "{0}-management".format(env_prefix), + "{0}-mv-web".format(env_prefix), + "{0}-{1}-web".format(env_prefix, self.group), + "{0}-hudl-{1}".format(env_prefix, self.group), "{0}-web".format(env_prefix), - "{0}-nginx".format(env_prefix), ] self.ingress_groups_to_add = [ @@ -61,6 +54,8 @@ def configure(self): "{0}-nginx".format(env_prefix) ] +# self.user_data = "{'bucket':'hudl-config','key':'" + self.environment[0] + "-mv-web/init.config.json','mongos': '$Mongos', 'mongoServers': '$MongoServers'}" + self.ports_to_authorize = [9000, 9001, 8095, 8096] self.IAM_ROLE_POLICIES.append('allow-web-initialization-{0}' @@ -70,9 +65,9 @@ def configure(self): #self.IAM_ROLE_POLICIES.append('{0}-{1}-web' # .format(self.environment, self.group)) + def configure(self): + super(IISNode, self).establish_logger() super(IISNode, self).configure() self.resolve_iam_role() - self.launch_configuration() - self.autoscaling_group() self.ingress_rules() diff --git a/tyr/servers/server.py b/tyr/servers/server.py index 3e2811f..d5b6cbe 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -9,8 +9,6 @@ from boto.ec2.networkinterface import NetworkInterfaceSpecification import json from boto.ec2.networkinterface import NetworkInterfaceCollection -from boto.ec2.autoscale import AutoScaleConnection -from boto.ec2.autoscale import LaunchConfiguration import urllib from boto.vpc import VPCConnection from paramiko.client import AutoAddPolicy, SSHClient @@ -31,8 +29,7 @@ def __init__(self, group=None, server_type=None, instance_type=None, environment=None, ami=None, region=None, role=None, keypair=None, availability_zone=None, security_groups=None, block_devices=None, chef_path=None, subnet_id=None, - dns_zones=None, desired_capacity=None, max_capacity=None, - min_capacity=None, ingress_groups_to_add=None, + dns_zones=None, ingress_groups_to_add=None, ports_to_authorize=None, classic_link=False): self.instance_type = instance_type @@ -50,9 +47,6 @@ def __init__(self, group=None, server_type=None, instance_type=None, self.dns_zones = dns_zones self.subnet_id = subnet_id self.vpc_id = None - self.desired_capacity = desired_capacity - self.max_capacity = max_capacity - self.min_capacity = min_capacity self.ingress_groups_to_add = ingress_groups_to_add self.ports_to_authorize = ports_to_authorize self.classic_link = classic_link @@ -106,12 +100,6 @@ def configure(self): self.log.warn('No environment provided') self.environment = 'test' - if self.desired_capacity is None: - self.desired_capacity = 3 - - if self.max_capacity is None: - self.max_capacity = 3 - self.environment = self.environment.lower() self.log.info('Using Environment "{environment}"'.format( @@ -135,7 +123,6 @@ def configure(self): self.establish_ec2_connection() self.establish_iam_connection() self.establish_route53_connection() - self.establish_autoscale_connection() if self.ami is None: self.log.warn('No AMI provided') @@ -667,14 +654,6 @@ def establish_route53_connection(self): self.log.error(str(e)) raise e - def establish_autoscale_connection(self): - try: - self.autoscale = boto.ec2.autoscale.connect_to_region(self.region) - self.log.info('Established connection to autoscale') - except Exception, e: - self.log.error(str(e)) - raise e - def get_security_group_ids(self, security_groups, vpc_id=None): security_group_ids = [] for group in security_groups: @@ -812,39 +791,6 @@ def route(self, wait=False): self.log.error(str(e)) raise e - def launch_configuration(self): - try: - self.autoscale.get_all_launch_configurations(names=[self.envcl]) - except: - log.info("Creating new launch_configuration: {0}".format(self.envcl)) - #userDataPlainText = "{'bucket':'hudl-config','key':'" + self.environment[0] + "-mv-web/init.config.json','mongos': '$Mongos', 'mongoServers': '$MongoServers'}" - lc = LaunchConfiguration(name=self.envcl, - image_id=self.ami, - key_name=self.keypair, - security_groups=self.security_groups) - conn.create_launch_configuration(lc) - self.launnch_configuration = lc - - def autoscaling_group(self): - try: - exiting_asg = self.autoscale.get_all_groups(names=[self.envcl]) - self.log.info('Autoscaling group already exists.') - except: - raise - if not existing_asg: - self.log.info("Creating new autoscaling group: {0}".format(self.group)) - - ag = AutoScalingGroup(name=self.group, - load_balancers=self.load_balancers, - availability_zones=self.availability_zones, - desired_capacity=self.desired_capacity, - health_check_period=self.health_check_grace_period, - launch_config=self.launnch_configuration, - min_size=self.min_size, - max_size=self.max_size, - default_cooldown=cooldown, - connection=self.autoscale) - conn.create_auto_scaling_group(ag) def ingress_rules(self): grp_id = self.get_security_group_ids([self.envcl]) @@ -939,95 +885,95 @@ def run(self, command): return state def bake(self): + if self.CHEF_RUNLIST: + chef_path = os.path.expanduser(self.chef_path) + self.chef_api = chef.autoconfigure(chef_path) - chef_path = os.path.expanduser(self.chef_path) - self.chef_api = chef.autoconfigure(chef_path) - - with self.chef_api: - try: - node = chef.Node(self.name) - node.delete() + with self.chef_api: + try: + node = chef.Node(self.name) + node.delete() - self.log.info('Removed previous chef node "{node}"'.format( - node = self.name)) - except chef.exceptions.ChefServerNotFoundError: - pass - except Exception as e: - self.log.error(str(e)) - raise e + self.log.info('Removed previous chef node "{node}"'.format( + node = self.name)) + except chef.exceptions.ChefServerNotFoundError: + pass + except Exception as e: + self.log.error(str(e)) + raise e - try: - client = chef.Client(self.name) - client = client.delete() + try: + client = chef.Client(self.name) + client = client.delete() - self.log.info('Removed previous chef client "{client}"'.format( - client = self.name)) - except chef.exceptions.ChefServerNotFoundError: - pass - except Exception as e: - self.log.error(str(e)) - raise e + self.log.info('Removed previous chef client "{client}"'.format( + client = self.name)) + except chef.exceptions.ChefServerNotFoundError: + pass + except Exception as e: + self.log.error(str(e)) + raise e - node = chef.Node.create(self.name) + node = chef.Node.create(self.name) - self.chef_node = node + self.chef_node = node - self.log.info('Created new Chef Node "{node}"'.format( - node = self.name)) + self.log.info('Created new Chef Node "{node}"'.format( + node = self.name)) - self.chef_node.chef_environment = self.environment + self.chef_node.chef_environment = self.environment - self.log.info('Set the Chef Environment to "{env}"'.format( - env = self.chef_node.chef_environment)) + self.log.info('Set the Chef Environment to "{env}"'.format( + env = self.chef_node.chef_environment)) - self.chef_node.run_list = self.CHEF_RUNLIST + self.chef_node.run_list = self.CHEF_RUNLIST - self.log.info('Set Chef run list to {list}'.format( - list = self.chef_node.run_list)) + self.log.info('Set Chef run list to {list}'.format( + list = self.chef_node.run_list)) - self.chef_node.save() - self.log.info('Saved the Chef Node configuration') + self.chef_node.save() + self.log.info('Saved the Chef Node configuration') def baked(self): + if self.CHEF_RUNLIST: + self.log.info('Determining status of "{node}"'.format( + node = self.hostname)) - self.log.info('Determining status of "{node}"'.format( - node = self.hostname)) + self.log.info('Waiting for Chef Client to start') - self.log.info('Waiting for Chef Client to start') + while True: + r = self.run('ls -l /var/log') - while True: - r = self.run('ls -l /var/log') - - if 'chef-client.log' in r['out']: - break - else: - time.sleep(10) + if 'chef-client.log' in r['out']: + break + else: + time.sleep(10) - self.log.info('Chef Client has started') + self.log.info('Chef Client has started') - self.log.info('Waiting for Chef Client to finish') + self.log.info('Waiting for Chef Client to finish') - while True: - r = self.run('pgrep chef-client') + while True: + r = self.run('pgrep chef-client') - if len(r['out']) > 0: - time.sleep(10) - else: - break + if len(r['out']) > 0: + time.sleep(10) + else: + break - self.log.info('Chef Client has finished') + self.log.info('Chef Client has finished') - self.log.info('Determining Node state') + self.log.info('Determining Node state') - r = self.run('tail /var/log/chef-client.log') + r = self.run('tail /var/log/chef-client.log') - if 'Chef Run complete in' in r['out']: - self.log.info('Chef Client was successful') - return True - else: - self.log.info('Chef Client was not successful') - self.log.debug(r['out']) - return False + if 'Chef Run complete in' in r['out']: + self.log.info('Chef Client was successful') + return True + else: + self.log.info('Chef Client was not successful') + self.log.debug(r['out']) + return False def autorun(self): From 5b918e1ef738b8721917a4011416e4b8dbb27f2c Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Mon, 13 Jul 2015 18:19:57 +0100 Subject: [PATCH 04/11] More refactoring and bug fixes --- tyr/autoscaler/autoscaler.py | 39 ++++++++++++++++++++++++---- tyr/clusters/iis.py | 49 ++++++++++++------------------------ tyr/servers/iis/node.py | 42 +++++++++++++++++++++++++------ tyr/servers/server.py | 12 ++++++--- 4 files changed, 93 insertions(+), 49 deletions(-) diff --git a/tyr/autoscaler/autoscaler.py b/tyr/autoscaler/autoscaler.py index 0686ea9..7cde12f 100644 --- a/tyr/autoscaler/autoscaler.py +++ b/tyr/autoscaler/autoscaler.py @@ -27,17 +27,43 @@ def __init__(self, launch_configuration, max_size=1, min_size=1, default_cooldown=300, - availability_zone=None, - health_check_grace_period=300): + availability_zones=None, + subnet_ids=None, + health_check_grace_period=300, + enable_classiclink=False): self.launch_configuration = launch_configuration self.autoscaling_group = autoscaling_group self.desired_capacity = desired_capacity + + # You can set a list of availability zones explicitly, else it will + # just use the one from the node object + if availability_zones: + self.autoscale_availability_zones = availability_zones + else: + self.autoscale_availability_zones = [node_obj.availability_zone] + + # If you set these they must match the availability zones + if subnet_ids: + self.autoscale_subnets = subnet_ids + else: + self.autoscale_subnets = [node_obj.subnet_id] + + self.availability_zones = availability_zones self.node_obj = node_obj self.max_size = max_size self.min_size = min_size self.default_cooldown = default_cooldown self.health_check_grace_period = health_check_grace_period + self.enable_classiclink = enable_classiclink + + if self.enable_classiclink: + self.vpc_security_groups = self.node_obj.get_security_group_ids( + self.node_obj.classic_link_vpc_security_groups) + self.classiclink_vpc_id = node_obj.subnet_id + else: + self.vpc_security_groups = None + self.classiclink_vpc_id = None def establish_autoscale_connection(self): try: @@ -60,7 +86,10 @@ def create_launch_configuration(self): key_name=self.node_obj.keypair, security_groups=self.node_obj.get_security_group_ids( self.node_obj.security_groups), - user_data=self.node_obj.user_data) + classic_link_vpc_security_groups=self.vpc_security_groups, + classic_link_vpc_id=self.classiclink_vpc_id, + user_data=self.node_obj.user_data, + instance_profile_name=self.node_obj.role) self.conn.create_launch_configuration(lc) self.launch_configuration = lc @@ -73,14 +102,14 @@ def create_autoscaling_group(self): .format(self.autoscaling_group)) ag = AutoScalingGroup(name=self.autoscaling_group, - availability_zones=[self.node_obj.availability_zone], + availability_zones=self.autoscale_availability_zones, desired_capacity=self.desired_capacity, health_check_period=self.health_check_grace_period, launch_config=self.launch_configuration, min_size=self.min_size, max_size=self.max_size, default_cooldown=self.default_cooldown, - vpc_zone_identifier=[self.node_obj.subnet_id], + vpc_zone_identifier=self.autoscale_subnets, connection=self.conn) self.conn.create_auto_scaling_group(ag) else: diff --git a/tyr/clusters/iis.py b/tyr/clusters/iis.py index cc9c895..7ffe760 100644 --- a/tyr/clusters/iis.py +++ b/tyr/clusters/iis.py @@ -1,6 +1,8 @@ import logging from tyr.servers.iis import IISNode from tyr.autoscaler import AutoScaler +from itertools import cycle + class IISCluster(): log = logging.getLogger('Clusters.IIS') @@ -19,7 +21,7 @@ def __init__(self, group=None, environment=None, ami=None, region=None, - subnet_id=None, + subnet_ids=[], role=None, keypair=None, security_groups=None, @@ -27,14 +29,11 @@ def __init__(self, group=None, desired_capacity=1, max_size=1, min_size=1, - launch_nodes=0, default_cooldown=300, - availability_zone=None, + availability_zones=None, health_check_grace_period=300, launch_configuration=None): - self.nodes = [] - self.group = group self.server_type = server_type self.instance_type = instance_type @@ -42,27 +41,31 @@ def __init__(self, group=None, self.ami = ami self.region = region self.role = role - self.subnet_id = subnet_id + self.subnet_ids = subnet_ids self.keypair = keypair self.security_groups = security_groups self.autoscaling_group = autoscaling_group self.desired_capacity = desired_capacity self.max_size = max_size self.min_size = min_size - self.launch_nodes = launch_nodes self.default_cooldown = default_cooldown - self.availability_zone = availability_zone + self.availability_zones = availability_zones self.health_check_grace_period = health_check_grace_period self.launch_configuration = launch_configuration + if self.availability_zones: + self.node_zone = availability_zones[0] + else: + self.node_zone = None + def provision(self): if not self.launch_configuration: self.launch_configuration = "{0}-{1}-web".format( self.environment[0], self.group) if not self.autoscaling_group: - if self.subnet_id: - templ = "{0}-{1}-web-asg-vpn" + if self.subnet_ids: + templ = "{0}-{1}-web-asg-vpc" else: templ = "{0}-{1}-web-asg" self.autoscaling_group = templ.format( @@ -76,9 +79,9 @@ def provision(self): region=self.region, role=self.role, keypair=self.keypair, - availability_zone=self.availability_zone, + availability_zone=self.node_zone, security_groups=self.security_groups, - subnet_id=self.subnet_id) + subnet_id=self.subnet_ids[0]) node.configure() auto = AutoScaler(launch_configuration=self.launch_configuration, @@ -87,32 +90,12 @@ def provision(self): max_size=self.max_size, min_size=self.min_size, default_cooldown=self.default_cooldown, - availability_zone=self.availability_zone, + availability_zones=self.availability_zones, health_check_grace_period=self.health_check_grace_period, node_obj=node) auto.autorun() - - # You can launch instances manually if required, but autoscale will - # launch them automatically if desired_capacity is set > 0 - for i in range(self.launch_nodes): - - node = IISNode(group=self.group, - server_type=self.server_type, - instance_type=self.instance_type, - environment=self.environment, - ami=self.ami, - region=self.region, - role=self.role, - keypair=self.keypair, - availability_zone=self.availability_zone, - security_groups=self.security_groups, - subnet_id=self.subnet_id) - - node.autorun() - self.nodes.append(node) def baked(self): - return all([node.baked() for node in self.nodes]) def autorun(self): diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index a9d1595..f7a55b5 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -1,4 +1,5 @@ from tyr.servers.server import Server +import json class IISNode(Server): @@ -22,11 +23,15 @@ class IISNode(Server): def __init__(self, group=None, server_type=None, instance_type=None, environment=None, ami=None, region=None, role=None, keypair=None, availability_zone=None, security_groups=None, - subnet_id=None): + subnet_id=None, mongos_service="MongosHost", + mongo_servers=None): if server_type is None: server_type = self.SERVER_TYPE + self.mongos_service = mongos_service + self.mongo_servers = mongo_servers + super(IISNode, self).__init__(group=group, server_type=server_type, instance_type=instance_type, environment=environment, @@ -37,7 +42,8 @@ def __init__(self, group=None, server_type=None, instance_type=None, availability_zone=availability_zone, security_groups=security_groups, subnet_id=subnet_id, - dns_zones=None) + dns_zones=None, + add_route53_dns=False) env_prefix = self.environment[0] @@ -49,12 +55,25 @@ def __init__(self, group=None, server_type=None, instance_type=None, "{0}-web".format(env_prefix), ] + self.classic_link_vpc_security_groups = [ + "{0}-management".format(env_prefix), + "{0}-mv-web".format(env_prefix), + "{0}-{1}-web".format(env_prefix, self.group), + "{0}-hudl-{1}".format(env_prefix, self.group), + ] + self.ingress_groups_to_add = [ "{0}-web".format(env_prefix), - "{0}-nginx".format(env_prefix) + "{0}-nginx".format(env_prefix), + "{0}-queueproc-jobs".format(env_prefix) ] -# self.user_data = "{'bucket':'hudl-config','key':'" + self.environment[0] + "-mv-web/init.config.json','mongos': '$Mongos', 'mongoServers': '$MongoServers'}" + if self.mongos_service: + mongo_ops = ("MongosHost", "Disabled", "MongosService") + if self.mongos_service not in mongo_ops: + raise ValueError( + "Mongo service name must be one of: {0}".format( + mongo_ops)) self.ports_to_authorize = [9000, 9001, 8095, 8096] @@ -62,12 +81,19 @@ def __init__(self, group=None, server_type=None, instance_type=None, .format(self.environment)) self.IAM_ROLE_POLICIES.append('allow-outpost-sns-{0}' .format(self.environment)) - #self.IAM_ROLE_POLICIES.append('{0}-{1}-web' - # .format(self.environment, self.group)) def configure(self): super(IISNode, self).establish_logger() super(IISNode, self).configure() - self.resolve_iam_role() - self.ingress_rules() + @property + def user_data(self): + data = {"bucket": "hudl-config", + "key": "{0}-mv-web/init.config.json".format( + self.environment[0]), + "mongos": self.mongos_service, + "mongoServers": self.mongo_servers + } + ud = json.dumps(data) + self.log.info("Setting user data to: {0}".format(ud)) + return ud diff --git a/tyr/servers/server.py b/tyr/servers/server.py index d5b6cbe..277cd15 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -30,7 +30,8 @@ def __init__(self, group=None, server_type=None, instance_type=None, keypair=None, availability_zone=None, security_groups=None, block_devices=None, chef_path=None, subnet_id=None, dns_zones=None, ingress_groups_to_add=None, - ports_to_authorize=None, classic_link=False): + ports_to_authorize=None, classic_link=False, + add_route53_dns=True): self.instance_type = instance_type self.group = group @@ -50,6 +51,7 @@ def __init__(self, group=None, server_type=None, instance_type=None, self.ingress_groups_to_add = ingress_groups_to_add self.ports_to_authorize = ports_to_authorize self.classic_link = classic_link + self.add_route53_dns = add_route53_dns def establish_logger(self): @@ -225,6 +227,9 @@ def configure(self): self.log.info('Using Chef path "{path}"'.format( path = self.chef_path)) + if self.ingress_groups_to_add: + self.ingress_rules() + if self.dns_zones is None: self.log.warn('No DNS zones specified') self.dns_zones = [ @@ -462,7 +467,7 @@ def get_subnet_vpc_id(self, subnet_id): vpc_id = subnets[0].vpc_id return vpc_id elif len(subnets) == 0: - raise NoSubnetReturned("No subnets returned") + raise NoSubnetReturned("No subnets returned for: {}".format(subnet_id)) else: raise Exception("More than 1 subnet returned") @@ -981,5 +986,6 @@ def autorun(self): self.configure() self.launch(wait=True) self.tag() - self.route() + if self.add_route53_dns: + self.route() self.bake() From 076554b53ffa5b1db01f356f9fb907bf3c57d71d Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Tue, 14 Jul 2015 09:24:21 +0100 Subject: [PATCH 05/11] Adding route53:ChangeResourceRecordSets --- tyr/policies/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tyr/policies/ec2.py b/tyr/policies/ec2.py index ec50a8b..d3e7165 100644 --- a/tyr/policies/ec2.py +++ b/tyr/policies/ec2.py @@ -184,7 +184,8 @@ "cloudwatch:ListMetrics", "cloudwatch:PutMetricAlarm", "cloudwatch:PutMetricData", - "cloudwatch:SetAlarmState" + "cloudwatch:SetAlarmState", + "route53:ChangeResourceRecordSets", ], "Sid": "Stmt1370290134000", "Resource": [ From 2df8f3806bcf5dfb66a218b2833cb875ad0751bb Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Tue, 14 Jul 2015 09:48:51 +0100 Subject: [PATCH 06/11] Adding route53:ChangeResourceRecordSets --- tyr/policies/ec2.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tyr/policies/ec2.py b/tyr/policies/ec2.py index 53d4456..08b33d4 100644 --- a/tyr/policies/ec2.py +++ b/tyr/policies/ec2.py @@ -136,7 +136,8 @@ "cloudwatch:ListMetrics", "cloudwatch:PutMetricAlarm", "cloudwatch:PutMetricData", - "cloudwatch:SetAlarmState" + "cloudwatch:SetAlarmState", + "route53:ChangeResourceRecordSets" ], "Sid": "Stmt1370290134000", "Resource": [ @@ -185,7 +186,7 @@ "cloudwatch:PutMetricAlarm", "cloudwatch:PutMetricData", "cloudwatch:SetAlarmState", - "route53:ChangeResourceRecordSets", + "route53:ChangeResourceRecordSets" ], "Sid": "Stmt1370290134000", "Resource": [ From 9ec7861a7f3c28bc2dbeaa8fcaa42718a5fccce3 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Tue, 14 Jul 2015 09:51:25 +0100 Subject: [PATCH 07/11] Change security group name --- tyr/servers/iis/node.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index f7a55b5..b819857 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -17,7 +17,6 @@ class IISNode(Server): 'allow-set-cloudwatch-alarms', 'allow-remove-cloudwatch-alarms', 'allow-deploy-web-updates', - ] def __init__(self, group=None, server_type=None, instance_type=None, @@ -48,7 +47,7 @@ def __init__(self, group=None, server_type=None, instance_type=None, env_prefix = self.environment[0] self.security_groups = [ - "{0}-management".format(env_prefix), + "management", "{0}-mv-web".format(env_prefix), "{0}-{1}-web".format(env_prefix, self.group), "{0}-hudl-{1}".format(env_prefix, self.group), From 0a5736bb2df5cf8d9d19b87216987a542a3a1f41 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Tue, 14 Jul 2015 12:22:23 +0100 Subject: [PATCH 08/11] Fixes for autoscaling groups --- tyr/autoscaler/autoscaler.py | 25 ++-- tyr/clusters/iis.py | 9 +- tyr/servers/server.py | 226 +++++++++++++++++------------------ 3 files changed, 130 insertions(+), 130 deletions(-) diff --git a/tyr/autoscaler/autoscaler.py b/tyr/autoscaler/autoscaler.py index 7cde12f..a0b9641 100644 --- a/tyr/autoscaler/autoscaler.py +++ b/tyr/autoscaler/autoscaler.py @@ -29,8 +29,7 @@ def __init__(self, launch_configuration, default_cooldown=300, availability_zones=None, subnet_ids=None, - health_check_grace_period=300, - enable_classiclink=False): + health_check_grace_period=300): self.launch_configuration = launch_configuration self.autoscaling_group = autoscaling_group @@ -41,29 +40,25 @@ def __init__(self, launch_configuration, if availability_zones: self.autoscale_availability_zones = availability_zones else: - self.autoscale_availability_zones = [node_obj.availability_zone] + self.autoscale_availability_zones = None # If you set these they must match the availability zones if subnet_ids: self.autoscale_subnets = subnet_ids else: - self.autoscale_subnets = [node_obj.subnet_id] + self.autoscale_subnets = None #(get the subnet IDs) + + if not subnet_ids and not availability_zones: + raise ValueError("Must specify either availability_zones or subnets.") + + if subnet_ids and availability_zones: + self.log.warning("Specified both availability_zones and subnets.") - self.availability_zones = availability_zones self.node_obj = node_obj self.max_size = max_size self.min_size = min_size self.default_cooldown = default_cooldown self.health_check_grace_period = health_check_grace_period - self.enable_classiclink = enable_classiclink - - if self.enable_classiclink: - self.vpc_security_groups = self.node_obj.get_security_group_ids( - self.node_obj.classic_link_vpc_security_groups) - self.classiclink_vpc_id = node_obj.subnet_id - else: - self.vpc_security_groups = None - self.classiclink_vpc_id = None def establish_autoscale_connection(self): try: @@ -86,8 +81,6 @@ def create_launch_configuration(self): key_name=self.node_obj.keypair, security_groups=self.node_obj.get_security_group_ids( self.node_obj.security_groups), - classic_link_vpc_security_groups=self.vpc_security_groups, - classic_link_vpc_id=self.classiclink_vpc_id, user_data=self.node_obj.user_data, instance_profile_name=self.node_obj.role) self.conn.create_launch_configuration(lc) diff --git a/tyr/clusters/iis.py b/tyr/clusters/iis.py index 7ffe760..3621821 100644 --- a/tyr/clusters/iis.py +++ b/tyr/clusters/iis.py @@ -42,6 +42,12 @@ def __init__(self, group=None, self.region = region self.role = role self.subnet_ids = subnet_ids + + if subnet_ids: + self.node_subnet = self.subnet_ids[0] + else: + self.node_subnet = None + self.keypair = keypair self.security_groups = security_groups self.autoscaling_group = autoscaling_group @@ -81,7 +87,7 @@ def provision(self): keypair=self.keypair, availability_zone=self.node_zone, security_groups=self.security_groups, - subnet_id=self.subnet_ids[0]) + subnet_id=self.node_subnet) node.configure() auto = AutoScaler(launch_configuration=self.launch_configuration, @@ -91,6 +97,7 @@ def provision(self): min_size=self.min_size, default_cooldown=self.default_cooldown, availability_zones=self.availability_zones, + subnet_ids=self.subnet_ids, health_check_grace_period=self.health_check_grace_period, node_obj=node) auto.autorun() diff --git a/tyr/servers/server.py b/tyr/servers/server.py index 1c80ce3..cbd56b2 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -104,7 +104,7 @@ def configure(self): self.environment = self.environment.lower() self.log.info('Using Environment "{environment}"'.format( - environment=self.environment)) + environment=self.environment)) if self.region is None: self.log.warn('No region provided') @@ -115,11 +115,11 @@ def configure(self): if not valid(self.region): error = '"{region}" is not a valid EC2 region'.format( - region=self.region) + region=self.region) raise RegionDoesNotExist(error) self.log.info('Using EC2 Region "{region}"'.format( - region=self.region)) + region=self.region)) self.establish_ec2_connection() self.establish_iam_connection() @@ -131,7 +131,7 @@ def configure(self): try: self.ec2.get_all_images(image_ids=[self.ami]) - except Exception, e: + except Exception as e: self.log.error(str(e)) if 'Invalid id' in str(e): error = '"{ami}" is not a valid AMI'.format(ami=self.ami) @@ -154,17 +154,17 @@ def configure(self): self.keypair = 'bkaiserkey' valid = lambda k: k in [pair.name for pair in - self.ec2.get_all_key_pairs()] + self.ec2.get_all_key_pairs()] if not valid(self.keypair): error = '"{keypair}" is not a valid EC2 keypair'.format( - keypair=self.keypair) + keypair=self.keypair) raise InvalidKeyPair(error) self.log.info('Using EC2 Key Pair "{keypair}"'.format( - keypair=self.keypair)) + keypair=self.keypair)) - if self.subnet_id is None: + if self.subnet_id is None: if self.availability_zone is None: self.log.warn('No EC2 availability zone provided,' ' using zone c') @@ -177,10 +177,10 @@ def configure(self): self.vpc_id = self.get_subnet_vpc_id(self.subnet_id) self.log.info("Using VPC {vpc_id}".format(vpc_id=self.vpc_id)) self.availability_zone = self.get_subnet_availability_zone( - self.subnet_id) + self.subnet_id) self.log.info("Using VPC, using availability zone " + "{availability_zone}".format( - availability_zone=self.availability_zone)) + availability_zone=self.availability_zone)) if len(self.availability_zone) == 1: self.availability_zone = self.region+self.availability_zone @@ -193,7 +193,7 @@ def configure(self): raise InvalidAvailabilityZone(error) self.log.info('Using EC2 Availability Zone "{zone}"'.format( - zone=self.availability_zone)) + zone=self.availability_zone)) if self.security_groups is None: self.log.warn('No EC2 security groups provided') @@ -202,7 +202,7 @@ def configure(self): self.security_groups.append(self.envcl) self.log.info('Using security groups {groups}'.format( - groups=', '.join(self.security_groups))) + groups=', '.join(self.security_groups))) self.resolve_security_groups() @@ -216,7 +216,7 @@ def configure(self): }] self.log.info('Using EC2 block devices {devices}'.format( - devices=self.block_devices)) + devices=self.block_devices)) if self.chef_path is None: self.log.warn('No Chef path provided') @@ -225,7 +225,7 @@ def configure(self): self.chef_path = os.path.expanduser(self.chef_path) self.log.info('Using Chef path "{path}"'.format( - path=self.chef_path)) + path=self.chef_path)) if self.ingress_groups_to_add: self.ingress_rules() @@ -287,7 +287,7 @@ def location(self): } return '{region}{zone}'.format(region=region_map[self.region], - zone=self.availability_zone[-1:]) + zone=self.availability_zone[-1:]) def next_index(self, supplemental={}): @@ -465,21 +465,22 @@ def get_subnet_vpc_id(self, subnet_id): vpc_id = subnets[0].vpc_id return vpc_id elif len(subnets) == 0: - raise NoSubnetReturned("No subnets returned for: {}".format(subnet_id)) + raise NoSubnetReturned("No subnets returned for: {}" + .format(subnet_id)) else: raise Exception("More than 1 subnet returned") def resolve_security_groups(self): filters = {} self.log.info("Resolving security groups") - + # If the server is being spun up in a vpc, search only that vpc exists = lambda s: s in [group.name for group in self.ec2.get_all_security_groups() if self.vpc_id == group.vpc_id] for index, group in enumerate(self.security_groups): - + if not exists(group): self.log.info('Security Group {group} does not exist' .format(group=group)) @@ -503,7 +504,7 @@ def resolve_iam_role(self): try: profile = self.iam.get_instance_profile(self.role) profile_exists = True - except Exception, e: + except Exception as e: if '404 Not Found' in str(e): pass else: @@ -514,16 +515,16 @@ def resolve_iam_role(self): try: instance_profile = self.iam.create_instance_profile(self.role) self.log.info('Created IAM Profile {profile}'.format( - profile=self.role)) + profile=self.role)) - except Exception, e: + except Exception as e: self.log.error(str(e)) raise e try: role = self.iam.get_role(self.role) role_exists = True - except Exception, e: + except Exception as e: if '404 Not Found' in str(e): pass else: @@ -538,9 +539,9 @@ def resolve_iam_role(self): self.iam.add_role_to_instance_profile(self.role, self.role) self.log.info('Attached Role {role}' ' to Profile {profile}'.format( - role=self.role, profile=self.role)) + role=self.role, profile=self.role)) - except Exception, e: + except Exception as e: self.log.error(str(e)) raise e @@ -559,22 +560,22 @@ def resolve_iam_role(self): if policy not in existing_policies: self.log.info('Policy "{policy}" does not exist'.format( - policy=policy)) + policy=policy)) try: self.iam.put_role_policy(self.role, policy, policies[policy]) self.log.info('Added policy "{policy}"'.format( - policy=policy)) - except Exception, e: + policy=policy)) + except Exception as e: self.log.error(str(e)) raise e else: self.log.info('Policy "{policy}" already exists'.format( - policy=policy)) + policy=policy)) tyr_copy = json.loads(policies[policy]) @@ -587,19 +588,19 @@ def resolve_iam_role(self): if tyr_copy == aws_copy: self.log.info('Policy "{policy}" is accurate'.format( - policy=policy)) + policy=policy)) else: self.log.warn('Policy "{policy}" has been modified'.format( - policy=policy)) + policy=policy)) try: self.iam.delete_role_policy(self.role, policy) self.log.info('Removed policy "{policy}"'.format( - policy=policy)) - except Exception, e: + policy=policy)) + except Exception as e: self.log.error(str(e)) raise e @@ -608,21 +609,21 @@ def resolve_iam_role(self): policies[policy]) self.log.info('Added policy "{policy}"'.format( - policy=policy)) - except Exception, e: + policy=policy)) + except Exception as e: self.log.error(str(e)) raise e def establish_ec2_connection(self): self.log.info('Using EC2 Region "{region}"'.format( - region=self.region)) + region=self.region)) self.log.info("Attempting to connect to EC2") try: self.ec2 = boto.ec2.connect_to_region(self.region) self.log.info('Established connection to EC2') - except Exception, e: + except Exception as e: self.log.error(str(e)) raise e @@ -668,7 +669,7 @@ def get_security_group_ids(self, security_groups, vpc_id=None): security_groups = [group for group in self.ec2.get_all_security_groups( - filters=filters) + filters=filters) if self.vpc_id == group.vpc_id] if len(security_groups) == 1: @@ -755,7 +756,7 @@ def route(self, wait=False): for record in dns_zone['records']: self.log.info('Processing DNS record {record}'.format( - record=record)) + record=record)) formatting_params = { 'hostname': self.hostname, @@ -773,10 +774,10 @@ def route(self, wait=False): record['value'] = record['value'].format(**formatting_params) self.log.info('Adding DNS record {record}'.format( - record=record)) + record=record)) existing_records = zone.find_records(name=record['name'], - type=record['type']) + type=record['type']) if existing_records: self.log.info('The DNS record already exists') @@ -799,25 +800,24 @@ def route(self, wait=False): self.log.error(str(e)) raise e - def ingress_rules(self): grp_id = self.get_security_group_ids([self.envcl]) - grps = self.ec2.get_all_security_groups(group_ids=grp_id) + main_group = self.ec2.get_all_security_groups(group_ids=grp_id) for ing in self.ingress_groups_to_add: self.log.info('Adding ingress rules for group: {0}' .format(ing)) grp_id = self.get_security_group_ids([ing]) grp_obj = self.ec2.get_all_security_groups(group_ids=grp_id[0])[0] for port in self.ports_to_authorize: - self.log.info("Adding port {0} to {1}.".format(port, ing)) + self.log.info("Adding port {0} from {1} to {2}." + .format(port, ing, main_group[0])) try: - grps[0].authorize(ip_protocol='tcp', + main_group[0].authorize(ip_protocol='tcp', from_port=port, to_port=port, src_group=grp_obj) - except boto.exception.EC2ResponseError: - self.log.warning("Unable to add ingress rule. May already exist.") - + except boto.exception.EC2ResponseError as e: + self.log.warning("Unable to add ingress rule. May already exist. ") @property def connection(self): @@ -850,8 +850,8 @@ def connection(self): keys = [os.path.expanduser(key) for key in keys] connection.connect(self.instance.private_dns_name, - username='ec2-user', - key_filename=keys) + username='ec2-user', + key_filename=keys) break except Exception: self.log.warn('Unable to establish SSH connection') @@ -893,95 +893,95 @@ def run(self, command): return state def bake(self): - if self.CHEF_RUNLIST: - chef_path = os.path.expanduser(self.chef_path) - self.chef_api = chef.autoconfigure(chef_path) + if self.CHEF_RUNLIST: + chef_path = os.path.expanduser(self.chef_path) + self.chef_api = chef.autoconfigure(chef_path) - with self.chef_api: - try: - node = chef.Node(self.name) - node.delete() + with self.chef_api: + try: + node = chef.Node(self.name) + node.delete() - self.log.info('Removed previous chef node "{node}"'.format( - node=self.name)) - except chef.exceptions.ChefServerNotFoundError: - pass - except Exception as e: - self.log.error(str(e)) - raise e + self.log.info('Removed previous chef node "{node}"'.format( + node=self.name)) + except chef.exceptions.ChefServerNotFoundError: + pass + except Exception as e: + self.log.error(str(e)) + raise e - try: - client = chef.Client(self.name) - client = client.delete() + try: + client = chef.Client(self.name) + client = client.delete() - self.log.info('Removed previous chef client "{client}"'.format( - client=self.name)) - except chef.exceptions.ChefServerNotFoundError: - pass - except Exception as e: - self.log.error(str(e)) - raise e + self.log.info('Removed previous chef client "{client}"' + .format(client=self.name)) + except chef.exceptions.ChefServerNotFoundError: + pass + except Exception as e: + self.log.error(str(e)) + raise e - node = chef.Node.create(self.name) + node = chef.Node.create(self.name) - self.chef_node = node + self.chef_node = node - self.log.info('Created new Chef Node "{node}"'.format( - node=self.name)) + self.log.info('Created new Chef Node "{node}"'.format( + node=self.name)) - self.chef_node.chef_environment = self.environment + self.chef_node.chef_environment = self.environment - self.log.info('Set the Chef Environment to "{env}"'.format( - env=self.chef_node.chef_environment)) + self.log.info('Set the Chef Environment to "{env}"'.format( + env=self.chef_node.chef_environment)) - self.chef_node.run_list = self.CHEF_RUNLIST + self.chef_node.run_list = self.CHEF_RUNLIST - self.log.info('Set Chef run list to {list}'.format( - list=self.chef_node.run_list)) + self.log.info('Set Chef run list to {list}'.format( + list=self.chef_node.run_list)) - self.chef_node.save() - self.log.info('Saved the Chef Node configuration') + self.chef_node.save() + self.log.info('Saved the Chef Node configuration') def baked(self): if self.CHEF_RUNLIST: - self.log.info('Determining status of "{node}"'.format( - node=self.hostname)) + self.log.info('Determining status of "{node}"'.format( + node=self.hostname)) - self.log.info('Waiting for Chef Client to start') + self.log.info('Waiting for Chef Client to start') - while True: - r = self.run('ls -l /var/log') + while True: + r = self.run('ls -l /var/log') - if 'chef-client.log' in r['out']: - break - else: - time.sleep(10) + if 'chef-client.log' in r['out']: + break + else: + time.sleep(10) - self.log.info('Chef Client has started') + self.log.info('Chef Client has started') - self.log.info('Waiting for Chef Client to finish') + self.log.info('Waiting for Chef Client to finish') - while True: - r = self.run('pgrep chef-client') + while True: + r = self.run('pgrep chef-client') - if len(r['out']) > 0: - time.sleep(10) - else: - break + if len(r['out']) > 0: + time.sleep(10) + else: + break - self.log.info('Chef Client has finished') + self.log.info('Chef Client has finished') - self.log.info('Determining Node state') + self.log.info('Determining Node state') - r = self.run('tail /var/log/chef-client.log') + r = self.run('tail /var/log/chef-client.log') - if 'Chef Run complete in' in r['out']: - self.log.info('Chef Client was successful') - return True - else: - self.log.info('Chef Client was not successful') - self.log.debug(r['out']) - return False + if 'Chef Run complete in' in r['out']: + self.log.info('Chef Client was successful') + return True + else: + self.log.info('Chef Client was not successful') + self.log.debug(r['out']) + return False def autorun(self): @@ -990,5 +990,5 @@ def autorun(self): self.launch(wait=True) self.tag() if self.add_route53_dns: - self.route() + self.route() self.bake() From 18d4263de4a3646fe449e35ef29ecfc390c902bd Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Fri, 17 Jul 2015 15:09:26 +0100 Subject: [PATCH 09/11] Fixes following code review --- tyr/autoscaler/__init__.py | 1 - tyr/clusters/__init__.py | 1 + .../autoscaler.py => clusters/autoscaling.py} | 48 +++++++++---------- tyr/clusters/iis.py | 27 ++++------- tyr/servers/iis/node.py | 4 +- tyr/servers/server.py | 15 +++--- 6 files changed, 43 insertions(+), 53 deletions(-) delete mode 100644 tyr/autoscaler/__init__.py rename tyr/{autoscaler/autoscaler.py => clusters/autoscaling.py} (72%) diff --git a/tyr/autoscaler/__init__.py b/tyr/autoscaler/__init__.py deleted file mode 100644 index 8d53e68..0000000 --- a/tyr/autoscaler/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from autoscaler import AutoScaler \ No newline at end of file diff --git a/tyr/clusters/__init__.py b/tyr/clusters/__init__.py index 4f72526..2772e8d 100644 --- a/tyr/clusters/__init__.py +++ b/tyr/clusters/__init__.py @@ -1,2 +1,3 @@ from mongo import MongoCluster from iis import IISCluster +from autoscaling import AutoScaler \ No newline at end of file diff --git a/tyr/autoscaler/autoscaler.py b/tyr/clusters/autoscaling.py similarity index 72% rename from tyr/autoscaler/autoscaler.py rename to tyr/clusters/autoscaling.py index a0b9641..0c81f8b 100644 --- a/tyr/autoscaler/autoscaler.py +++ b/tyr/clusters/autoscaling.py @@ -1,4 +1,3 @@ -from boto.ec2.autoscale import AutoScaleConnection from boto.ec2.autoscale import LaunchConfiguration from boto.ec2.autoscale import AutoScalingGroup import boto.ec2 @@ -9,17 +8,6 @@ class AutoScaler(object): ''' Autoscaler setup class ''' - - log = logging.getLogger('Clusters.AutoScaler') - log.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter( - '%(asctime)s [%(name)s] %(levelname)s: %(message)s', - datefmt='%H:%M:%S') - ch.setFormatter(formatter) - log.addHandler(ch) - def __init__(self, launch_configuration, autoscaling_group, node_obj, @@ -30,6 +18,7 @@ def __init__(self, launch_configuration, availability_zones=None, subnet_ids=None, health_check_grace_period=300): + self.log = logging.getLogger('Tyr.Clusters.AutoScaler') self.launch_configuration = launch_configuration self.autoscaling_group = autoscaling_group @@ -46,10 +35,13 @@ def __init__(self, launch_configuration, if subnet_ids: self.autoscale_subnets = subnet_ids else: - self.autoscale_subnets = None #(get the subnet IDs) + self.autoscale_subnets = None # TODO: (get the subnet IDs) if not subnet_ids and not availability_zones: - raise ValueError("Must specify either availability_zones or subnets.") + self.log.critical( + "Must specify either availability_zones or subnets.") + raise ValueError( + "Must specify either availability_zones or subnets.") if subnet_ids and availability_zones: self.log.warning("Specified both availability_zones and subnets.") @@ -62,24 +54,26 @@ def __init__(self, launch_configuration, def establish_autoscale_connection(self): try: - self.conn = boto.ec2.autoscale.connect_to_region(self.node_obj.region) + self.conn = boto.ec2.autoscale.connect_to_region( + self.node_obj.region) self.log.info('Established connection to autoscale') except: raise def create_launch_configuration(self): - self.log.info("Getting launch_configuration: {0}" - .format(self.launch_configuration)) + self.log.info("Getting launch_configuration: {l}" + .format(l=self.launch_configuration)) lc = self.conn.get_all_launch_configurations( names=[self.launch_configuration]) if not lc: - self.log.info("Creating new launch_configuration: {0}" - .format(self.launch_configuration)) + self.log.info("Creating new launch_configuration: {l}" + .format(l=self.launch_configuration)) lc = LaunchConfiguration(name=self.launch_configuration, image_id=self.node_obj.ami, key_name=self.node_obj.keypair, - security_groups=self.node_obj.get_security_group_ids( + security_groups=self.node_obj. + get_security_group_ids( self.node_obj.security_groups), user_data=self.node_obj.user_data, instance_profile_name=self.node_obj.role) @@ -91,13 +85,15 @@ def create_autoscaling_group(self): names=[self.autoscaling_group]) if not existing_asg: - self.log.info("Creating new autoscaling group: {0}" - .format(self.autoscaling_group)) + self.log.info("Creating new autoscaling group: {g}" + .format(g=self.autoscaling_group)) ag = AutoScalingGroup(name=self.autoscaling_group, - availability_zones=self.autoscale_availability_zones, + availability_zones=self. + autoscale_availability_zones, desired_capacity=self.desired_capacity, - health_check_period=self.health_check_grace_period, + health_check_period=self. + health_check_grace_period, launch_config=self.launch_configuration, min_size=self.min_size, max_size=self.max_size, @@ -106,8 +102,8 @@ def create_autoscaling_group(self): connection=self.conn) self.conn.create_auto_scaling_group(ag) else: - self.log.info('Autoscaling group {0} already exists.' - .format(self.autoscaling_group)) + self.log.info('Autoscaling group {g} already exists.' + .format(g=self.autoscaling_group)) def autorun(self): self.establish_autoscale_connection() diff --git a/tyr/clusters/iis.py b/tyr/clusters/iis.py index 3621821..6155dfb 100644 --- a/tyr/clusters/iis.py +++ b/tyr/clusters/iis.py @@ -1,19 +1,9 @@ import logging from tyr.servers.iis import IISNode -from tyr.autoscaler import AutoScaler -from itertools import cycle +from tyr.clusters.autoscaling import AutoScaler class IISCluster(): - log = logging.getLogger('Clusters.IIS') - log.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter( - '%(asctime)s [%(name)s] %(levelname)s: %(message)s', - datefmt='%H:%M:%S') - ch.setFormatter(formatter) - log.addHandler(ch) def __init__(self, group=None, server_type=None, @@ -34,6 +24,7 @@ def __init__(self, group=None, health_check_grace_period=300, launch_configuration=None): + self.log = logging.getLogger('Tyr.Clusters.IISCluster') self.group = group self.server_type = server_type self.instance_type = instance_type @@ -43,11 +34,6 @@ def __init__(self, group=None, self.role = role self.subnet_ids = subnet_ids - if subnet_ids: - self.node_subnet = self.subnet_ids[0] - else: - self.node_subnet = None - self.keypair = keypair self.security_groups = security_groups self.autoscaling_group = autoscaling_group @@ -59,6 +45,11 @@ def __init__(self, group=None, self.health_check_grace_period = health_check_grace_period self.launch_configuration = launch_configuration + if subnet_ids: + self.node_subnet = self.subnet_ids[0] + else: + self.node_subnet = None + if self.availability_zones: self.node_zone = availability_zones[0] else: @@ -77,6 +68,7 @@ def provision(self): self.autoscaling_group = templ.format( self.environment[0], self.group) + # Template to use with an autoscaling group node = IISNode(group=self.group, server_type=self.server_type, instance_type=self.instance_type, @@ -98,7 +90,8 @@ def provision(self): default_cooldown=self.default_cooldown, availability_zones=self.availability_zones, subnet_ids=self.subnet_ids, - health_check_grace_period=self.health_check_grace_period, + health_check_grace_period=self. + health_check_grace_period, node_obj=node) auto.autorun() diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index b819857..b1c49f9 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -77,9 +77,9 @@ def __init__(self, group=None, server_type=None, instance_type=None, self.ports_to_authorize = [9000, 9001, 8095, 8096] self.IAM_ROLE_POLICIES.append('allow-web-initialization-{0}' - .format(self.environment)) + .format(self.environment)) self.IAM_ROLE_POLICIES.append('allow-outpost-sns-{0}' - .format(self.environment)) + .format(self.environment)) def configure(self): super(IISNode, self).establish_logger() diff --git a/tyr/servers/server.py b/tyr/servers/server.py index cbd56b2..1e52060 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -60,7 +60,7 @@ def establish_logger(self): except: pass - log = logging.getLogger(self.__class__.__name__) + log = logging.getLogger('Tyr.{c}'.format(c=self.__class__.__name__)) if not log.handlers: log.setLevel(logging.DEBUG) @@ -801,23 +801,24 @@ def route(self, wait=False): raise e def ingress_rules(self): - grp_id = self.get_security_group_ids([self.envcl]) + grp_id = self.get_security_group_ids([self.envcl], vpc_id=self.vpc_id) main_group = self.ec2.get_all_security_groups(group_ids=grp_id) for ing in self.ingress_groups_to_add: self.log.info('Adding ingress rules for group: {0}' .format(ing)) - grp_id = self.get_security_group_ids([ing]) + grp_id = self.get_security_group_ids([ing], vpc_id=self.vpc_id) grp_obj = self.ec2.get_all_security_groups(group_ids=grp_id[0])[0] for port in self.ports_to_authorize: self.log.info("Adding port {0} from {1} to {2}." .format(port, ing, main_group[0])) try: main_group[0].authorize(ip_protocol='tcp', - from_port=port, - to_port=port, - src_group=grp_obj) + from_port=port, + to_port=port, + src_group=grp_obj) except boto.exception.EC2ResponseError as e: - self.log.warning("Unable to add ingress rule. May already exist. ") + self.log.warning( + "Unable to add ingress rule. May already exist. ") @property def connection(self): From a2797262cb6280a6848e39415496e41c8aeea383 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Fri, 17 Jul 2015 16:11:37 +0100 Subject: [PATCH 10/11] Fixes for logging --- tyr/clusters/autoscaling.py | 3 ++- tyr/clusters/iis.py | 4 +++- tyr/servers/server.py | 24 ++++++++++++------------ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/tyr/clusters/autoscaling.py b/tyr/clusters/autoscaling.py index 0c81f8b..e783182 100644 --- a/tyr/clusters/autoscaling.py +++ b/tyr/clusters/autoscaling.py @@ -18,7 +18,8 @@ def __init__(self, launch_configuration, availability_zones=None, subnet_ids=None, health_check_grace_period=300): - self.log = logging.getLogger('Tyr.Clusters.AutoScaler') + self.log = logging.getLogger('tyr.clusters.AutoScaler') + self.log.setLevel(logging.DEBUG) self.launch_configuration = launch_configuration self.autoscaling_group = autoscaling_group diff --git a/tyr/clusters/iis.py b/tyr/clusters/iis.py index 6155dfb..30bd958 100644 --- a/tyr/clusters/iis.py +++ b/tyr/clusters/iis.py @@ -24,7 +24,7 @@ def __init__(self, group=None, health_check_grace_period=300, launch_configuration=None): - self.log = logging.getLogger('Tyr.Clusters.IISCluster') + self.log = logging.getLogger('tyr.clusters.IISCluster') self.group = group self.server_type = server_type self.instance_type = instance_type @@ -57,6 +57,7 @@ def __init__(self, group=None, def provision(self): + self.log.info('Provisioning IISCluster') if not self.launch_configuration: self.launch_configuration = "{0}-{1}-web".format( self.environment[0], self.group) @@ -82,6 +83,7 @@ def provision(self): subnet_id=self.node_subnet) node.configure() + self.log.info('Creating autoscaler') auto = AutoScaler(launch_configuration=self.launch_configuration, autoscaling_group=self.autoscaling_group, desired_capacity=self.desired_capacity, diff --git a/tyr/servers/server.py b/tyr/servers/server.py index 1e52060..1b9f961 100644 --- a/tyr/servers/server.py +++ b/tyr/servers/server.py @@ -60,19 +60,19 @@ def establish_logger(self): except: pass - log = logging.getLogger('Tyr.{c}'.format(c=self.__class__.__name__)) + log = logging.getLogger('tyr.{c}' + .format(c=self.__class__.__name__)) + log.setLevel(logging.DEBUG) + self.log = log if not log.handlers: - log.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter( - '%(asctime)s [%(name)s] %(levelname)s: %(message)s', - datefmt='%H:%M:%S') - ch.setFormatter(formatter) - log.addHandler(ch) - - self.log = log + # Configure a root logger + logging.basicConfig(level=logging.INFO, + format='%(asctime)s [%(name)s]' + ' %(levelname)s: %(message)s', + datefmt='%H:%M:%S') + # Reduce boto logging + logging.getLogger('boto').setLevel(logging.CRITICAL) def configure(self): @@ -81,7 +81,7 @@ def configure(self): self.instance_type = 't2.medium' self.log.info('Using Instance Type "{instance_type}"'.format( - instance_type=self.instance_type)) + instance_type=self.instance_type)) if self.group is None: self.log.warn('No group provided') From 489b601e40cef3a5ed3cb12c42be14bc3f4f5609 Mon Sep 17 00:00:00 2001 From: Chris Gilbert Date: Fri, 17 Jul 2015 16:58:16 +0100 Subject: [PATCH 11/11] change default mongo servers to empty string as per PS script --- tyr/servers/iis/node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tyr/servers/iis/node.py b/tyr/servers/iis/node.py index b1c49f9..4acc0c5 100644 --- a/tyr/servers/iis/node.py +++ b/tyr/servers/iis/node.py @@ -6,7 +6,7 @@ class IISNode(Server): SERVER_TYPE = 'web' - AMI_NAME = '' + AMI_NAME = None # Do not run chef CHEF_RUNLIST = [] @@ -23,7 +23,7 @@ def __init__(self, group=None, server_type=None, instance_type=None, environment=None, ami=None, region=None, role=None, keypair=None, availability_zone=None, security_groups=None, subnet_id=None, mongos_service="MongosHost", - mongo_servers=None): + mongo_servers=""): if server_type is None: server_type = self.SERVER_TYPE