diff --git a/marathon_lb.py b/marathon_lb.py index caa75a0d..4f73c8bc 100755 --- a/marathon_lb.py +++ b/marathon_lb.py @@ -1208,6 +1208,8 @@ def compareMapFile(map_file, map_string): def get_health_check(app, portIndex): + if 'healthChecks' not in app: + return None for check in app['healthChecks']: if check.get('port'): return check diff --git a/tests/1-nginx-marathon1.5.json b/tests/1-nginx-marathon1.5.json new file mode 100644 index 00000000..2c32b8a8 --- /dev/null +++ b/tests/1-nginx-marathon1.5.json @@ -0,0 +1,34 @@ +{ + "id": "nginx", + "container": { + "type": "DOCKER", + "docker": { + "image": "brndnmtthws/nginx-echo-sleep", + "forcePullImage":true + }, + "portMappings": [ + { "hostPort": 0, "containerPort": 8080, "servicePort": 10000 } + ] + }, + "networks": [ + { "mode": "container/bridge" } + ], + "instances": 5, + "cpus": 0.1, + "mem": 65, + "healthChecks": [{ + "protocol": "MESOS_HTTP", + "path": "/", + "portIndex": 0, + "timeoutSeconds": 15, + "gracePeriodSeconds": 15, + "intervalSeconds": 3, + "maxConsecutiveFailures": 10 + }], + "labels":{ + "HAPROXY_DEPLOYMENT_GROUP":"nginx", + "HAPROXY_DEPLOYMENT_ALT_PORT":"10001", + "HAPROXY_GROUP":"external" + }, + "acceptedResourceRoles":["*", "slave_public"] +} diff --git a/tests/marathon15_apps.json b/tests/marathon15_apps.json new file mode 100644 index 00000000..b07318d4 --- /dev/null +++ b/tests/marathon15_apps.json @@ -0,0 +1,87 @@ +{ + "apps": [ + { + "id": "/pywebserver", + "backoffFactor": 1.15, + "backoffSeconds": 1, + "cmd": "echo \"host $HOST and port $PORT\" > index.html && python -m http.server 80", + "container": { + "type": "DOCKER", + "docker": { + "forcePullImage": false, + "image": "python:3", + "parameters": [], + "privileged": false + }, + "volumes": [], + "portMappings": [ + { + "containerPort": 80, + "hostPort": 0, + "labels": {}, + "name": "test", + "protocol": "tcp", + "servicePort": 10101 + } + ] + }, + "cpus": 0.1, + "disk": 0, + "executor": "", + "instances": 1, + "labels": { + "HAPROXY_GROUP": "external", + "HAPROXY_0_VHOST": "myvhost.com" + }, + "maxLaunchDelaySeconds": 3600, + "mem": 128, + "gpus": 0, + "networks": [ + { + "mode": "container/bridge" + } + ], + "requirePorts": false, + "upgradeStrategy": { + "maximumOverCapacity": 1, + "minimumHealthCapacity": 1 + }, + "version": "2017-07-19T17:34:41.967Z", + "versionInfo": { + "lastScalingAt": "2017-07-19T17:34:41.967Z", + "lastConfigChangeAt": "2017-07-19T17:34:41.967Z" + }, + "killSelection": "YOUNGEST_FIRST", + "unreachableStrategy": { + "inactiveAfterSeconds": 300, + "expungeAfterSeconds": 600 + }, + "tasksStaged": 0, + "tasksRunning": 1, + "tasksHealthy": 0, + "tasksUnhealthy": 0, + "deployments": [], + "tasks": [ + { + "ipAddresses": [ + { + "ipAddress": "172.17.0.2", + "protocol": "IPv4" + } + ], + "stagedAt": "2017-07-19T17:34:43.039Z", + "state": "TASK_RUNNING", + "ports": [ + 1565 + ], + "startedAt": "2017-07-19T17:35:15.654Z", + "version": "2017-07-19T17:34:41.967Z", + "id": "pywebserver.8cad6a69-6ca8-11e7-beb2-0e2beceebfcc", + "appId": "/pywebserver", + "slaveId": "db7b40e2-791c-445f-b373-183e2a648a86-S1", + "host": "10.0.2.148" + } + ] + } + ] +} diff --git a/tests/test_marathon_lb.py b/tests/test_marathon_lb.py index e0562c43..25112348 100644 --- a/tests/test_marathon_lb.py +++ b/tests/test_marathon_lb.py @@ -1219,6 +1219,63 @@ def test_config_simple_app_balance(self): ''' self.assertMultiLineEqual(config, expected) + def test_bridge_app_marathon15(self): + with open('tests/marathon15_apps.json') as data_file: + apps = json.load(data_file) + + class Marathon: + def __init__(self, data): + self.data = data + + def list(self): + return self.data + + def health_check(self): + return True + + def strict_mode(self): + return False + + groups = ['external'] + bind_http_https = True + ssl_certs = "" + templater = marathon_lb.ConfigTemplater() + apps = marathon_lb.get_apps(Marathon(apps['apps'])) + config = marathon_lb.config(apps, groups, bind_http_https, + ssl_certs, templater) + expected = self.base_config + ''' +frontend marathon_http_in + bind *:80 + mode http + acl host_myvhost_com_pywebserver hdr(host) -i myvhost.com + use_backend pywebserver_10101 if host_myvhost_com_pywebserver + +frontend marathon_http_appid_in + bind *:9091 + mode http + acl app__pywebserver hdr(x-marathon-app-id) -i /pywebserver + use_backend pywebserver_10101 if app__pywebserver + +frontend marathon_https_in + bind *:443 ssl crt /etc/ssl/cert.pem + mode http + use_backend pywebserver_10101 if { ssl_fc_sni myvhost.com } + +frontend pywebserver_10101 + bind *:10101 + mode http + use_backend pywebserver_10101 + +backend pywebserver_10101 + balance roundrobin + mode http + option forwardfor + http-request set-header X-Forwarded-Port %[dst_port] + http-request add-header X-Forwarded-Proto https if { ssl_fc } + server 10_0_2_148_1565 10.0.2.148:1565 +''' + self.assertMultiLineEqual(config, expected) + def test_zdd_app(self): with open('tests/zdd_apps.json') as data_file: zdd_apps = json.load(data_file) diff --git a/tests/test_utils.py b/tests/test_utils.py index 50eb6496..3443224c 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -102,6 +102,48 @@ def test_get_task_ip_and_ports_ip_per_task_no_ip_marathon13(self): self.assertEquals(result, expected) + def test_get_task_ip_and_ports_ip_per_task_marathon15(self): + app = { + 'container': { + 'type': 'DOCKER', + 'docker': { + 'image': 'nginx' + }, + 'portMappings': [ + { + 'containerPort': 80, + 'servicePort': 10000, + }, + { + 'containerPort': 81, + 'servicePort': 10001, + }, + ] + }, + 'networks': [ + { + 'mode': 'container', + 'name': 'dcos' + } + ] + } + task = { + "id": "testtaskid", + "ipAddresses": [{"ipAddress": "1.2.3.4"}] + } + + result = utils.get_task_ip_and_ports(app, task) + expected = ("1.2.3.4", [80, 81]) + self.assertEquals(result, expected) + + task_no_ip = { + "id": "testtaskid", + } + + result = utils.get_task_ip_and_ports(app, task_no_ip) + expected = (None, None) + self.assertEquals(result, expected) + def test_get_task_ip_and_ports_portmapping_null(self): app = { 'ipAddress': {}, @@ -289,6 +331,38 @@ def test_ip_per_task_marathon13(self): self.assertEquals(self.assigner.get_service_ports(app), [10000, 10001]) + def test_ip_per_task_marathon15(self): + app = { + 'container': { + 'type': 'DOCKER', + 'docker': { + 'image': 'nginx' + }, + 'portMappings': [ + { + 'containerPort': 80, + 'servicePort': 10000, + }, + { + 'containerPort': 81, + 'servicePort': 10001, + }, + ], + }, + 'networks': [ + { + 'mode': 'container', + 'name': 'dcos' + } + ], + 'tasks': [{ + "id": "testtaskid", + "ipAddresses": [{"ipAddress": "1.2.3.4"}] + }], + } + self.assertEquals(self.assigner.get_service_ports(app), + [10000, 10001]) + def test_ip_per_task_portMappings_null(self): app = { 'ipAddress': {}, diff --git a/tests/test_zdd.py b/tests/test_zdd.py index 07886d09..29c6218a 100644 --- a/tests/test_zdd.py +++ b/tests/test_zdd.py @@ -8,7 +8,7 @@ class Arguments: - json = 'tests/1-nginx.json' + json = None force = False marathon = "http://marathon" marathon_lb = "http://marathon-lb:9090" @@ -40,6 +40,20 @@ def _load_listeners(): return zdd.parse_haproxy_stats(f.read()) +def _arg_cases(): + args1 = Arguments() + args2 = Arguments() + args1.json = 'tests/1-nginx.json' + args2.json = 'tests/1-nginx-marathon1.5.json' + return [args1, args2] + + +def _apps_cases(): + apps1 = json.loads(open('tests/zdd_app_blue.json').read()) + apps2 = json.loads(open('tests/zdd_app_blue_marathon1.5.json').read()) + return [apps1, apps2] + + class TestBluegreenDeploy(unittest.TestCase): @mock.patch('zdd.scale_marathon_app_instances') @@ -48,18 +62,20 @@ def test_scale_new_app_instances_up_50_percent(self, mock): existing instances if we have not yet met or surpassed the amount of instances deployed by old_app """ - new_app = { - 'instances': 10, - 'labels': { - 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + def run(args): + args.initial_instances = 5 + new_app = { + 'instances': 10, + 'labels': { + 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + } } - } - old_app = {'instances': 30} - args = Arguments() - args.initial_instances = 5 - zdd.scale_new_app_instances(args, new_app, old_app) - mock.assert_called_with( - args, new_app, 15) + old_app = {'instances': 30} + zdd.scale_new_app_instances(args, new_app, old_app) + mock.assert_called_with( + args, new_app, 15) + for a in _arg_cases(): + run(a) @mock.patch('zdd.scale_marathon_app_instances') def test_scale_new_app_instances_to_target(self, mock): @@ -67,18 +83,20 @@ def test_scale_new_app_instances_to_target(self, mock): amount of instances deployed for old_app, go right to our deployment target amount of instances for new_app """ - new_app = { - 'instances': 10, - 'labels': { - 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + def run(args): + new_app = { + 'instances': 10, + 'labels': { + 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + } } - } - old_app = {'instances': 8} - args = Arguments() - args.initial_instances = 5 - zdd.scale_new_app_instances(args, new_app, old_app) - mock.assert_called_with( - args, new_app, 30) + old_app = {'instances': 8} + args.initial_instances = 5 + zdd.scale_new_app_instances(args, new_app, old_app) + mock.assert_called_with( + args, new_app, 30) + for a in _arg_cases(): + run(a) @mock.patch('zdd.scale_marathon_app_instances') def test_scale_new_app_instances_hybrid(self, mock): @@ -86,58 +104,59 @@ def test_scale_new_app_instances_hybrid(self, mock): amount of instances deployed for old_app, go right to our deployment target amount of instances for new_app """ - new_app = { - 'instances': 10, - 'labels': { - 'HAPROXY_DEPLOYMENT_NEW_INSTANCES': 15, - 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + def run(args): + new_app = { + 'instances': 10, + 'labels': { + 'HAPROXY_DEPLOYMENT_NEW_INSTANCES': 15, + 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES': 30 + } } - } - old_app = {'instances': 20} - args = Arguments() - args.initial_instances = 5 - zdd.scale_new_app_instances(args, new_app, old_app) - mock.assert_called_with( - args, new_app, 15) + old_app = {'instances': 20} + args.initial_instances = 5 + zdd.scale_new_app_instances(args, new_app, old_app) + mock.assert_called_with( + args, new_app, 15) + for a in _arg_cases(): + run(a) def test_find_drained_task_ids(self): - listeners = _load_listeners() - haproxy_instance_count = 2 - apps = json.loads(open('tests/zdd_app_blue.json').read()) - app = apps['apps'][0] - - results = \ - zdd.find_drained_task_ids(app, - listeners, - haproxy_instance_count) - - assert app['tasks'][0]['id'] in results # 2 l's down, no sessions - assert app['tasks'][1]['id'] not in results # 1 l up, 1 down - assert app['tasks'][2]['id'] not in results # 2 l's d, 1 w/ scur/qcur + def run(apps): + listeners = _load_listeners() + haproxy_instance_count = 2 + app = apps['apps'][0] + results = zdd.find_drained_task_ids(app, + listeners, + haproxy_instance_count) + assert app['tasks'][0]['id'] in results # 2l's down, no sessions + assert app['tasks'][1]['id'] not in results # 1l up, 1down + assert app['tasks'][2]['id'] not in results # 2l's d, 1w/scur/qcur + for a in _apps_cases(): + run(a) def test_find_draining_task_ids(self): - listeners = _load_listeners() - haproxy_instance_count = 2 - apps = json.loads(open('tests/zdd_app_blue.json').read()) - app = apps['apps'][0] - - results = \ - zdd.find_draining_task_ids(app, - listeners, - haproxy_instance_count) - - assert app['tasks'][0]['id'] in results # 2 l's down, no sessions - assert app['tasks'][1]['id'] not in results # 1 l up, 1 down - assert app['tasks'][2]['id'] in results # 2 l's down, 1 w/ scur/qcur + def run(apps): + listeners = _load_listeners() + haproxy_instance_count = 2 + app = apps['apps'][0] + results = zdd.find_draining_task_ids(app, + listeners, + haproxy_instance_count) + assert app['tasks'][0]['id'] in results # 2l's down, no sessions + assert app['tasks'][1]['id'] not in results # 1l up, 1 down + assert app['tasks'][2]['id'] in results # 2l's down, 1w/scur/qcur + for a in _apps_cases(): + run(a) def test_get_svnames_from_tasks(self): - apps = json.loads(open('tests/zdd_app_blue.json').read()) - tasks = apps['apps'][0]['tasks'] - - task_svnames = zdd.get_svnames_from_tasks(apps, tasks) - assert '10_0_6_25_16916' in task_svnames - assert '10_0_6_25_31184' in task_svnames - assert '10_0_6_25_23336' in task_svnames + def run(apps): + tasks = apps['apps'][0]['tasks'] + task_svnames = zdd.get_svnames_from_tasks(apps, tasks) + assert '10_0_6_25_16916' in task_svnames + assert '10_0_6_25_31184' in task_svnames + assert '10_0_6_25_23336' in task_svnames + for a in _apps_cases(): + run(a) def test_parse_haproxy_stats(self): with open('tests/haproxy_stats.csv') as f: @@ -152,25 +171,25 @@ def test_parse_haproxy_stats(self): @mock.patch('subprocess.check_call') def test_pre_kill_hook(self, mock): # TODO(BM): This test is naive. An end-to-end test would be nice. - args = Arguments() - args.pre_kill_hook = 'myhook' - old_app = { - 'id': 'oldApp' - } - new_app = { - 'id': 'newApp' - } - tasks_to_kill = ['task1', 'task2'] - - zdd.execute_pre_kill_hook(args, - old_app, - tasks_to_kill, - new_app) - - mock.assert_called_with([args.pre_kill_hook, - '{"id": "oldApp"}', - '["task1", "task2"]', - '{"id": "newApp"}']) + def run(args): + args.pre_kill_hook = 'myhook' + old_app = { + 'id': 'oldApp' + } + new_app = { + 'id': 'newApp' + } + tasks_to_kill = ['task1', 'task2'] + zdd.execute_pre_kill_hook(args, + old_app, + tasks_to_kill, + new_app) + mock.assert_called_with([args.pre_kill_hook, + '{"id": "oldApp"}', + '["task1", "task2"]', + '{"id": "newApp"}']) + for a in _arg_cases(): + run(a) @mock.patch('zdd.fetch_combined_haproxy_stats', mock.Mock(side_effect=lambda _: _load_listeners())) @@ -191,11 +210,13 @@ def test_fetch_app_listeners(self): mock.Mock(side_effect=lambda hostname: (hostname, [], ['127.0.0.1', '127.0.0.2']))) def test_get_marathon_lb_urls(self): - marathon_lb_urls = zdd.get_marathon_lb_urls(Arguments()) - - assert 'http://127.0.0.1:9090' in marathon_lb_urls - assert 'http://127.0.0.2:9090' in marathon_lb_urls - assert 'http://127.0.0.3:9090' not in marathon_lb_urls + def run(args): + marathon_lb_urls = zdd.get_marathon_lb_urls(args) + assert 'http://127.0.0.1:9090' in marathon_lb_urls + assert 'http://127.0.0.2:9090' in marathon_lb_urls + assert 'http://127.0.0.3:9090' not in marathon_lb_urls + for a in _arg_cases(): + run(a) @mock.patch('requests.get', mock.Mock(side_effect=lambda k, auth: @@ -204,9 +225,10 @@ def test_simple(self): # This test just checks the output of the program against # some expected output from six import StringIO - + args = Arguments() + args.json = 'tests/1-nginx.json' out = StringIO() - zdd.do_zdd(Arguments(), out) + zdd.do_zdd(args, out) output = json.loads(out.getvalue()) output['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" @@ -257,6 +279,75 @@ def test_simple(self): }, "mem": 65 } +''') + expected['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" + self.assertEqual(output, expected) + + @mock.patch('requests.get', + mock.Mock(side_effect=lambda k, auth: + MyResponse('tests/zdd_app_blue_marathon1.5.json'))) + def test_simple_marathon15(self): + # This test just checks the output of the program against + # some expected output + from six import StringIO + args = Arguments() + args.json = 'tests/1-nginx-marathon1.5.json' + out = StringIO() + zdd.do_zdd(args, out) + output = json.loads(out.getvalue()) + output['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" + + expected = json.loads('''{ + "acceptedResourceRoles": [ + "*", + "slave_public" + ], + "container": { + "docker": { + "forcePullImage": true, + "image": "brndnmtthws/nginx-echo-sleep" + }, + "type": "DOCKER", + "portMappings": [ + { + "containerPort": 8080, + "hostPort": 0, + "servicePort": 10001 + } + ] + }, + "networks": [ + { + "mode": "container/bridge" + } + ], + "cpus": 0.1, + "healthChecks": [ + { + "gracePeriodSeconds": 15, + "intervalSeconds": 3, + "maxConsecutiveFailures": 10, + "path": "/", + "portIndex": 0, + "protocol": "MESOS_HTTP", + "timeoutSeconds": 15 + } + ], + "id": "/nginx-blue", + "instances": 1, + "labels": { + "HAPROXY_0_PORT": "10000", + "HAPROXY_APP_ID": "nginx", + "HAPROXY_DEPLOYMENT_ALT_PORT": "10001", + "HAPROXY_DEPLOYMENT_COLOUR": "blue", + "HAPROXY_DEPLOYMENT_GROUP": "nginx", + "HAPROXY_DEPLOYMENT_NEW_INSTANCES": "0", + "HAPROXY_DEPLOYMENT_STARTED_AT": "2016-02-01T15:51:38.184623", + "HAPROXY_DEPLOYMENT_TARGET_INSTANCES": "3", + "HAPROXY_GROUP": "external" + }, + "mem": 65 +} ''') expected['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" self.assertEqual(output, expected) @@ -268,9 +359,9 @@ def test_hybrid(self): # This test just checks the output of the program against # some expected output from six import StringIO - - out = StringIO() args = Arguments() + args.json = 'tests/1-nginx.json' + out = StringIO() args.new_instances = 1 zdd.do_zdd(args, out) output = json.loads(out.getvalue()) @@ -323,6 +414,76 @@ def test_hybrid(self): }, "mem": 65 } +''') + expected['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" + self.assertEqual(output, expected) + + @mock.patch('requests.get', + mock.Mock(side_effect=lambda k, auth: + MyResponse('tests/zdd_app_blue_marathon1.5.json'))) + def test_hybrid_marathon15(self): + # This test just checks the output of the program against + # some expected output + from six import StringIO + args = Arguments() + args.json = 'tests/1-nginx-marathon1.5.json' + out = StringIO() + args.new_instances = 1 + zdd.do_zdd(args, out) + output = json.loads(out.getvalue()) + output['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" + + expected = json.loads('''{ + "acceptedResourceRoles": [ + "*", + "slave_public" + ], + "container": { + "docker": { + "forcePullImage": true, + "image": "brndnmtthws/nginx-echo-sleep" + }, + "portMappings": [ + { + "containerPort": 8080, + "hostPort": 0, + "servicePort": 10001 + } + ], + "type": "DOCKER" + }, + "networks": [ + { + "mode": "container/bridge" + } + ], + "cpus": 0.1, + "healthChecks": [ + { + "gracePeriodSeconds": 15, + "intervalSeconds": 3, + "maxConsecutiveFailures": 10, + "path": "/", + "portIndex": 0, + "protocol": "MESOS_HTTP", + "timeoutSeconds": 15 + } + ], + "id": "/nginx-blue", + "instances": 1, + "labels": { + "HAPROXY_0_PORT": "10000", + "HAPROXY_APP_ID": "nginx", + "HAPROXY_DEPLOYMENT_ALT_PORT": "10001", + "HAPROXY_DEPLOYMENT_COLOUR": "blue", + "HAPROXY_DEPLOYMENT_GROUP": "nginx", + "HAPROXY_DEPLOYMENT_NEW_INSTANCES": "1", + "HAPROXY_DEPLOYMENT_STARTED_AT": "2016-02-01T15:51:38.184623", + "HAPROXY_DEPLOYMENT_TARGET_INSTANCES": "3", + "HAPROXY_GROUP": "external" + }, + "mem": 65 +} ''') expected['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = "" self.assertEqual(output, expected) @@ -333,7 +494,8 @@ def test_hybrid(self): def test_complete_cur_exception(self): # This test just checks the output of the program against # some expected output - - args = Arguments() - args.complete_cur = True - self.assertRaises(InvalidArgException, zdd.do_zdd, args) + def run(args): + args.complete_cur = True + self.assertRaises(InvalidArgException, zdd.do_zdd, args) + for a in _arg_cases(): + run(a) diff --git a/tests/zdd_app_blue_marathon1.5.json b/tests/zdd_app_blue_marathon1.5.json new file mode 100644 index 00000000..4daf209a --- /dev/null +++ b/tests/zdd_app_blue_marathon1.5.json @@ -0,0 +1,178 @@ +{ + "apps": [ + { + "id": "/nginx-green", + "acceptedResourceRoles": [ + "*", + "slave_public" + ], + "backoffFactor": 1.15, + "backoffSeconds": 1, + "container": { + "type": "DOCKER", + "docker": { + "forcePullImage": true, + "image": "brndnmtthws/nginx-echo-sleep", + "parameters": [], + "privileged": false + }, + "volumes": [], + "portMappings": [ + { + "containerPort": 8080, + "hostPort": 0, + "labels": {}, + "protocol": "tcp", + "servicePort": 10000 + } + ] + }, + "cpus": 0.1, + "disk": 0, + "executor": "", + "healthChecks": [ + { + "gracePeriodSeconds": 15, + "intervalSeconds": 3, + "maxConsecutiveFailures": 10, + "path": "/", + "portIndex": 0, + "protocol": "MESOS_HTTP", + "timeoutSeconds": 15, + "delaySeconds": 15 + } + ], + "instances": 3, + "labels": { + "HAPROXY_DEPLOYMENT_GROUP": "nginx", + "HAPROXY_GROUP": "external", + "HAPROXY_DEPLOYMENT_COLOUR": "green", + "HAPROXY_DEPLOYMENT_TARGET_INSTANCES": "20", + "HAPROXY_DEPLOYMENT_STARTED_AT": "2016-02-01T14:13:42.499089", + "HAPROXY_DEPLOYMENT_ALT_PORT": "10001", + "HAPROXY_0_PORT": "10000", + "HAPROXY_APP_ID": "nginx" + }, + "maxLaunchDelaySeconds": 3600, + "mem": 65, + "gpus": 0, + "networks": [ + { + "mode": "container/bridge" + } + ], + "requirePorts": false, + "upgradeStrategy": { + "maximumOverCapacity": 1, + "minimumHealthCapacity": 1 + }, + "version": "2017-07-14T15:13:34.402Z", + "versionInfo": { + "lastScalingAt": "2017-07-14T15:13:34.402Z", + "lastConfigChangeAt": "2017-07-14T15:13:34.402Z" + }, + "killSelection": "YOUNGEST_FIRST", + "unreachableStrategy": { + "inactiveAfterSeconds": 300, + "expungeAfterSeconds": 600 + }, + "tasksStaged": 0, + "tasksRunning": 3, + "tasksHealthy": 3, + "tasksUnhealthy": 0, + "deployments": [], + "tasks": [ + { + "ipAddresses": [ + { + "ipAddress": "172.17.0.2", + "protocol": "IPv4" + } + ], + "stagedAt": "2017-07-14T15:13:35.804Z", + "state": "TASK_RUNNING", + "ports": [ + 16916 + ], + "startedAt": "2017-07-14T15:13:58.326Z", + "version": "2017-07-14T15:13:34.402Z", + "id": "nginx-green.01da9a15-68a7-11e7-a229-e6a514ca0c21", + "appId": "/nginx-green", + "slaveId": "c28a2184-c702-482c-91c8-5af9318434d4-S0", + "host": "10.0.6.25", + "healthCheckResults": [ + { + "alive": true, + "consecutiveFailures": 0, + "firstSuccess": "2017-07-14T15:14:18.395Z", + "lastFailure": null, + "lastSuccess": "2017-07-14T15:14:18.395Z", + "lastFailureCause": null, + "instanceId": "nginx-green.marathon-01da9a15-68a7-11e7-a229-e6a514ca0c21" + } + ] + }, + { + "ipAddresses": [ + { + "ipAddress": "172.17.0.2", + "protocol": "IPv4" + } + ], + "stagedAt": "2017-07-14T15:13:35.816Z", + "state": "TASK_RUNNING", + "ports": [ + 31184 + ], + "startedAt": "2017-07-14T15:13:57.633Z", + "version": "2017-07-14T15:13:34.402Z", + "id": "nginx-green.01dc95e6-68a7-11e7-a229-e6a514ca0c21", + "appId": "/nginx-green", + "slaveId": "c28a2184-c702-482c-91c8-5af9318434d4-S1", + "host": "10.0.6.25", + "healthCheckResults": [ + { + "alive": true, + "consecutiveFailures": 0, + "firstSuccess": "2017-07-14T15:14:17.698Z", + "lastFailure": null, + "lastSuccess": "2017-07-14T15:14:17.698Z", + "lastFailureCause": null, + "instanceId": "nginx-green.marathon-01dc95e6-68a7-11e7-a229-e6a514ca0c21" + } + ] + }, + { + "ipAddresses": [ + { + "ipAddress": "172.17.0.3", + "protocol": "IPv4" + } + ], + "stagedAt": "2017-07-14T15:13:35.760Z", + "state": "TASK_RUNNING", + "ports": [ + 23336 + ], + "startedAt": "2017-07-14T15:13:57.639Z", + "version": "2017-07-14T15:13:34.402Z", + "id": "nginx-green.01bf22d4-68a7-11e7-a229-e6a514ca0c21", + "appId": "/nginx-green", + "slaveId": "c28a2184-c702-482c-91c8-5af9318434d4-S1", + "host": "10.0.6.25", + "healthCheckResults": [ + { + "alive": true, + "consecutiveFailures": 0, + "firstSuccess": "2017-07-14T15:14:17.702Z", + "lastFailure": null, + "lastSuccess": "2017-07-14T15:14:17.702Z", + "lastFailureCause": null, + "instanceId": "nginx-green.marathon-01bf22d4-68a7-11e7-a229-e6a514ca0c21" + } + ] + } + ] + } + ] +} diff --git a/utils.py b/utils.py index bd0e43e1..de451d40 100644 --- a/utils.py +++ b/utils.py @@ -114,12 +114,10 @@ def get_service_ports(self, app): :return: The list of ports. Note that if auto-assigning and ports become exhausted, a port may be returned as None. """ - # Are we using 'USER' network? - if is_user_network(app): + mode = get_app_networking_mode(app) + if mode == "container" or mode == "container/bridge": # Here we must use portMappings - portMappings = app.get('container', {})\ - .get('docker', {})\ - .get('portMappings', []) + portMappings = get_app_port_mappings(app) if portMappings: ports = filter(lambda p: p is not None, map(lambda p: p.get('servicePort', None), @@ -135,7 +133,8 @@ def get_service_ports(self, app): app.get('portDefinitions', [])) ) ports = list(ports) # wtf python? - if not ports and is_ip_per_task(app) and self.can_assign \ + # This supports legacy ip-per-container for Marathon 1.4.x and prior + if not ports and mode == "container" and self.can_assign \ and len(app['tasks']) > 0: task = app['tasks'][0] _, task_ports = get_task_ip_and_ports(app, task) @@ -275,25 +274,120 @@ def set(self, lru_cache): ip_cache = LRUCacheSingleton() -def is_ip_per_task(app): - """ - Return whether the application is using IP-per-task. - :param app: The application to check. - :return: True if using IP per task, False otherwise. - """ - return app.get('ipAddress') is not None +def get_app_networking_mode(app): + mode = 'host' + if app.get('ipAddress'): + mode = 'container' -def is_user_network(app): - """ - Returns True if container network mode is set to USER - :param app: The application to check. - :return: True if using USER network, False otherwise. - """ - c = app.get('container', {}) - return c is not None and c.get('type', '') == 'DOCKER' and \ - c.get('docker', {})\ - .get('network', '') == 'USER' + _mode = app.get('container', {})\ + .get('docker', {})\ + .get('network', '') + if _mode == 'USER': + mode = 'container' + elif _mode == 'BRIDGE': + mode = 'container/bridge' + + networks = app.get('networks', []) + for n in networks: + # Modes cannot be mixed, so assigning the last mode is fine + mode = n.get('mode', 'container') + + return mode + + +def get_task_ip(task, mode): + if mode == 'container': + task_ip_addresses = task.get('ipAddresses', []) + if not task_ip_addresses: + logger.warning("Task %s does not yet have an ip address allocated", + task['id']) + return None + task_ip = task_ip_addresses[0].get('ipAddress') + if not task_ip: + logger.warning("Task %s does not yet have an ip address allocated", + task['id']) + return None + return task_ip + else: + host = task.get('host') + if not host: + logger.warning("Could not find task host, ignoring") + task_ip = resolve_ip(host) + if not task_ip: + logger.warning("Could not resolve ip for host %s, ignoring", + host) + return task_ip + + +def get_app_port_mappings(app): + portMappings = app.get('container', {})\ + .get('docker', {})\ + .get('portMappings') + if portMappings: + return portMappings + + portMappings = app.get('container', {})\ + .get('portMappings') + return portMappings + + +def get_task_ports(task): + return task.get('ports') + + +def get_port_definition_ports(app): + port_definitions = app.get('portDefinitions', []) + task_ports = [p['port'] + for p in port_definitions + if 'port' in p] + if len(task_ports) == 0: + return None + return task_ports + + +def get_ip_address_discovery_ports(app): + ip_address = app.get('ipAddress', {}) + if ip_address: + discovery = app.get('ipAddress', {}).get('discovery', {}) + task_ports = [int(p['number']) + for p in discovery.get('ports', []) + if 'number' in p] + if len(task_ports) > 0: + return task_ports + return None + + +def get_port_mapping_ports(app): + port_mappings = get_app_port_mappings(app) + task_ports = [p['containerPort'] + for p in port_mappings + if 'containerPort' in p] + if len(task_ports) == 0: + return None + return task_ports + + +def get_app_task_ports(app, task, mode): + if mode == 'host': + task_ports = get_task_ports(task) + if task_ports: + return task_ports + return get_port_definition_ports(app) + elif mode == 'container/bridge': + task_ports = get_task_ports(task) + if task_ports: + return task_ports + # Will only work for Marathon < 1.5 + task_ports = get_port_definition_ports(app) + if task_ports: + return task_ports + return get_port_mapping_ports(app) + else: + task_ports = get_ip_address_discovery_ports(app) + if task_ports: + return task_ports + return get_port_mapping_ports(app) def get_task_ip_and_ports(app, task): @@ -307,44 +401,11 @@ def get_task_ip_and_ports(app, task): :return: Tuple of (ip address, [ports]). Returns (None, None) if no IP address could be resolved or found for the task. """ - # If the app ipAddress field is present and not None then this app is using - # IP per task. The ipAddress may be an empty dictionary though, in which - # case there are no discovery ports. At the moment, Mesos only supports a - # single IP address, so just take the first IP in the list. - if is_ip_per_task(app): - logger.debug("Using IP per container") - - task_ip_addresses = task.get('ipAddresses') - if not task_ip_addresses: - logger.warning("Task %s does not yet have an ip address allocated", - task['id']) - return None, None - - task_ip = task_ip_addresses[0]['ipAddress'] - - # Are we using 'USER' network? - if is_user_network(app): - # in this case, we pull the port from portMappings - portMappings = app.get('container', {})\ - .get('docker', {})\ - .get('portMappings', []) - _port_mappings = portMappings or app.get('portDefinitions', []) - _attr = 'containerPort' if portMappings else 'port' - task_ports = [p.get(_attr) - for p in _port_mappings - if _attr in p] - else: - discovery = app['ipAddress'].get('discovery', {}) - task_ports = [int(port['number']) - for port in discovery.get('ports', [])] - else: - logger.debug("Using host port mapping") - task_ports = task.get('ports', []) - task_ip = resolve_ip(task['host']) - if not task_ip: - logger.warning("Could not resolve ip for host %s, ignoring", - task['host']) - return None, None + mode = get_app_networking_mode(app) + task_ip = get_task_ip(task, mode) + task_ports = get_app_task_ports(app, task, mode) + if not (task_ip and task_ports): + return None, None logger.debug("Returning: %r, %r", task_ip, task_ports) return task_ip, task_ports diff --git a/zdd.py b/zdd.py index f726f911..a6b08710 100755 --- a/zdd.py +++ b/zdd.py @@ -18,7 +18,7 @@ from common import (get_marathon_auth_params, set_logging_args, set_marathon_auth_args, setup_logging) -from utils import get_task_ip_and_ports +from utils import (get_task_ip_and_ports, get_app_port_mappings) from zdd_exceptions import ( AppCreateException, AppDeleteException, AppScaleException, InvalidArgException, MarathonEndpointException, @@ -496,24 +496,40 @@ def deploy_marathon_app(args, app): def get_service_port(app): - try: - return \ - int(app['container']['docker']['portMappings'][0]['servicePort']) - except KeyError: - try: - return \ - int(app['portDefinitions'][0]['port']) - except KeyError: - return int(app['ports'][0]) + portMappings = get_app_port_mappings(app) + if len(portMappings) > 0: + servicePort = portMappings[0].get('servicePort') + if servicePort: + return servicePort + portDefinitions = app.get('portDefinitions', []) + if len(portDefinitions) > 0: + port = ['portDefinitions'][0].get('port') + if port: + return int(port) + ports = app.get('ports', []) + if len(ports) > 0: + return int(ports[0]) + raise MissingFieldException("App doesn't contain a service port", + 'container.portMappings') def set_service_port(app, servicePort): - try: - app['container']['docker']['portMappings'][0]['servicePort'] = \ - int(servicePort) - except KeyError: - app['ports'][0] = int(servicePort) - + container = app.get('container', {}) + portMappings = container.get('docker', {}).get('portMappings', []) + if len(portMappings) > 0: + app['container']['docker']['portMappings'][0]['servicePort'] =\ + int(servicePort) + return app + portMappings = container.get('portMappings', []) + if len(portMappings) > 0: + app['container']['portMappings'][0]['servicePort'] =\ + int(servicePort) + return app + portDefinitions = app.get('portDefinitions', []) + if len(portDefinitions) > 0: + app['portDefinitions'][0]['port'] = int(servicePort) + return app + app['ports'][0] = int(servicePort) return app @@ -547,21 +563,15 @@ def set_app_ids(app, colour): def set_service_ports(app, servicePort): app['labels']['HAPROXY_0_PORT'] = str(get_service_port(app)) - try: - app['container']['docker']['portMappings'][0]['servicePort'] = \ - int(servicePort) - return app - except KeyError: - app['ports'][0] = int(servicePort) - return app + return set_service_port(app, servicePort) def select_next_port(app): alt_port = int(app['labels']['HAPROXY_DEPLOYMENT_ALT_PORT']) - if int(app['ports'][0]) == alt_port: - return int(app['labels']['HAPROXY_0_PORT']) - else: - return alt_port + if 'ports' in app: + if int(app['ports'][0]) == alt_port: + return int(app['labels']['HAPROXY_0_PORT']) + return alt_port def select_next_colour(app):