From 328c8f195c41ecf4421ebffdbdcfe847e3f33384 Mon Sep 17 00:00:00 2001 From: izapolsk Date: Sun, 13 May 2018 23:06:20 +0300 Subject: [PATCH 1/2] replacing old openshift api with new one --- README.rst | 2 +- wrapanapi/__init__.py | 1 - wrapanapi/containers/__init__.py | 179 -------------- wrapanapi/containers/container.py | 22 -- wrapanapi/containers/deployment_config.py | 16 -- wrapanapi/containers/image.py | 59 ----- wrapanapi/containers/image_registry.py | 51 ---- wrapanapi/containers/node.py | 22 -- wrapanapi/containers/pod.py | 15 -- wrapanapi/containers/project.py | 15 -- .../containers/providers/rhkubernetes.py | 195 --------------- wrapanapi/containers/providers/rhopenshift.py | 227 +++++++++++------- wrapanapi/containers/replicator.py | 16 -- wrapanapi/containers/route.py | 8 - wrapanapi/containers/service.py | 15 -- wrapanapi/containers/template.py | 8 - wrapanapi/containers/volume.py | 23 -- 17 files changed, 139 insertions(+), 735 deletions(-) delete mode 100644 wrapanapi/containers/container.py delete mode 100644 wrapanapi/containers/deployment_config.py delete mode 100644 wrapanapi/containers/image.py delete mode 100644 wrapanapi/containers/image_registry.py delete mode 100644 wrapanapi/containers/node.py delete mode 100644 wrapanapi/containers/pod.py delete mode 100644 wrapanapi/containers/project.py delete mode 100644 wrapanapi/containers/providers/rhkubernetes.py delete mode 100644 wrapanapi/containers/replicator.py delete mode 100644 wrapanapi/containers/route.py delete mode 100644 wrapanapi/containers/service.py delete mode 100644 wrapanapi/containers/template.py delete mode 100644 wrapanapi/containers/volume.py diff --git a/README.rst b/README.rst index ce7b570b..43750314 100644 --- a/README.rst +++ b/README.rst @@ -23,12 +23,12 @@ wrapanapi is a simple virtualization client with support (in varying degrees) fo * Red Hat Enterprize Virtualization (RHEV) * Red Hat Openstack (RHOS) +* Red Hat Openshift * Openshift * VMware vCenter * Microsoft SCVMM * Microsoft Azure * Google Compute Engine -* Kubernetes * Hawkular * Amazon EC2 diff --git a/wrapanapi/__init__.py b/wrapanapi/__init__.py index 95f14af0..4a0d6269 100644 --- a/wrapanapi/__init__.py +++ b/wrapanapi/__init__.py @@ -10,7 +10,6 @@ from .msazure import AzureSystem # NOQA from .virtualcenter import VMWareSystem # NOQA from .google import GoogleCloudSystem # NOQA -from wrapanapi.containers.providers.rhkubernetes import Kubernetes # NOQA from wrapanapi.containers.providers.rhopenshift import Openshift # NOQA from .hawkular import Hawkular # NOQA from .lenovo import LenovoSystem # NOQA diff --git a/wrapanapi/containers/__init__.py b/wrapanapi/containers/__init__.py index b2c4bfc7..e69de29b 100644 --- a/wrapanapi/containers/__init__.py +++ b/wrapanapi/containers/__init__.py @@ -1,179 +0,0 @@ -from __future__ import absolute_import -import re -from cached_property import cached_property - -from wrapanapi.exceptions import (RequestFailedException, InvalidValueException, - LabelNotFoundException, ResourceAlreadyExistsException, - UncreatableResourceException) - - -class ContainersResourceBase(object): - """ - A container resource base class. This class includes the base functions of (almost) - all container resources. Each resource has its own API entry and use different API - (Kubernetes or OpenShift). Each resource has get, post, patch and delete methods which - directed to the path of the resource. - The following parameters should be statically defined: - * RESOURCE_TYPE: (str) the resource type name in the API - * CREATABLE (optional): (bool) Specify whether this resource is creatable or not (some - resources are not, e.g. Pod is created by Replication Controller - and not manually). set to False by default. - * (optional) API: (str) The API to use - the default is Kubernetes ('k_api') but some - resources use OpenShift ('o_api'). - * (optional) VALID_NAME_PATTERN: (str) the regex pattern that match a valid object name - * (optional) KIND: (str) the resource 'Kind' property as it appear in JSON. - if not specified, grabbing the Kind from the class name. - """ - CREATABLE = False - API = 'k_api' - - def __init__(self, provider, name, namespace): - """ - Args: - provider: (Openshift || Kubernetes) The containers provider - name: (str) The name of the resource - namespace: (str) The namespace used for this resource - """ - if hasattr(self, 'VALID_NAME_PATTERN') and not re.match(self.VALID_NAME_PATTERN, name): - raise InvalidValueException('{0} name "{1}" is invalid. {0} name must ' - 'match the regex "{2}"' - .format(self.RESOURCE_TYPE, name, self.VALID_NAME_PATTERN)) - self.provider = provider - self.name = name - self.namespace = namespace - - def __eq__(self, other): - return (getattr(self, 'namespace', None) == getattr(other, 'namespace', None) and - self.name == getattr(other, 'name', None)) - - def __repr__(self): - return '<{} name="{}" namespace="{}">'.format( - self.__class__.__name__, self.name, self.namespace) - - def exists(self): - """Return whether this object exists or not.""" - try: - self.get() - return True - except RequestFailedException: - return False - - @cached_property - def api(self): - """Return the used API according to the defined API.""" - return getattr(self.provider, self.API) - - @classmethod - def kind(cls): - """Return the resource Kind property as it should be in the JSON""" - return getattr(cls, 'KIND', None) or cls.__name__ - - @classmethod - def create(cls, provider, payload): - """Creating the object if it doesn't exist and creatable. - Args: - provider: (Openshift || Kubernetes) The containers provider. - payload: The JSON data to create this object. - Returns: - The created instance of that resource. - Raises: - UncreatableResourceException, ResourceAlreadyExistsException. - """ - if not cls.CREATABLE: - raise UncreatableResourceException(cls.RESOURCE_TYPE) - api = getattr(provider, cls.API) - # Checking name validity - name = payload['metadata']['name'] - if hasattr(cls, 'VALID_NAME_PATTERN') and not re.match(cls.VALID_NAME_PATTERN, name): - raise InvalidValueException('{0} name "{1}" is invalid. {0} name must ' - 'match the regex "{2}"' - .format(cls.RESOURCE_TYPE, name, cls.VALID_NAME_PATTERN)) - # Choosing the arguments accordingly, some resources are - # not namespaced and require different arguments. - if 'namespace' in payload['metadata']: - obj = cls(provider, name, payload['metadata']['namespace']) - else: - obj = cls(provider, name) - # Defining default/predefined parameters - payload['apiVersion'] = payload.get('apiVersion', 'v1') - payload['kind'] = cls.kind() - # Checking existence - if obj.exists(): - raise ResourceAlreadyExistsException( - '{} "{}" already exists.'.format(cls.RESOURCE_TYPE, obj.name)) - status_code, json_content = api.post(cls.RESOURCE_TYPE, payload, - namespace=obj.namespace) - # Verifying success - if status_code not in (200, 201): - raise RequestFailedException( - 'Failed to create {} "{}". status_code: {}; json_content: {};' - .format(cls.RESOURCE_TYPE, obj.name, status_code, json_content) - ) - return obj - - @property - def name_for_api(self): - """The name used for the API (In Image the name for API is id)""" - return self.name - - @property - def project_name(self): - # For backward compatibility - return self.namespace - - @property - def metadata(self): - return self.get()['metadata'] - - @property - def spec(self): - return self.get()['spec'] - - @property - def status(self): - return self.get()['status'] - - def get(self, convert=None): - """Sends a GET request to the resource.""" - status_code, json_content = self.api.get(self.RESOURCE_TYPE, name=self.name_for_api, - namespace=self.namespace, convert=convert) - if status_code != 200: - raise RequestFailedException('GET request of {} "{}" returned status code {}. ' - 'json content: {}' - .format(self.RESOURCE_TYPE, self.name_for_api, - status_code, json_content)) - return json_content - - def post(self, data, convert=None): - """Sends a POST request with the given data to the resource.""" - return self.api.post(self.RESOURCE_TYPE, data, name=self.name_for_api, - namespace=self.namespace, convert=convert) - - def patch(self, data, convert=None, - headers={'Content-Type': 'application/strategic-merge-patch+json'}): - """Sends a PATCH request with the given data/headers to the resource.""" - return self.api.patch(self.RESOURCE_TYPE, data, name=self.name_for_api, - namespace=self.namespace, convert=convert, headers=headers) - - def delete(self, convert=None): - """Sends a DELETE request to the resource (delete the resource).""" - return self.api.delete(self.RESOURCE_TYPE, self.name_for_api, - namespace=self.namespace, convert=convert) - - def list_labels(self): - """List the labels of this resource""" - return self.metadata.get('labels', {}) - - def set_label(self, key, value): - """Sets a label for this resource""" - return self.patch({'metadata': {'labels': {key: str(value)}}}) - - def delete_label(self, key): - """Deletes a label from this resource""" - original_labels = self.list_labels() - if key not in original_labels: - raise LabelNotFoundException(key) - del original_labels[key] - labels = {'$patch': 'replace'} - labels.update(original_labels) - return self.patch({'metadata': {'labels': labels}}) diff --git a/wrapanapi/containers/container.py b/wrapanapi/containers/container.py deleted file mode 100644 index ba80ca5d..00000000 --- a/wrapanapi/containers/container.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import absolute_import -from .pod import Pod - - -class Container(object): - - def __init__(self, provider, name, pod, image): - if not isinstance(pod, Pod): - raise TypeError('pod argument should be an Pod instance') - self.provider = provider - self.name = name - self.pod = pod - self.image = image - - @property - def cg_name(self): - # For backward compatibility - return self.pod.name - - @property - def namespace(self): - return self.pod.namespace diff --git a/wrapanapi/containers/deployment_config.py b/wrapanapi/containers/deployment_config.py deleted file mode 100644 index 1b084bfe..00000000 --- a/wrapanapi/containers/deployment_config.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class DeploymentConfig(ContainersResourceBase): - RESOURCE_TYPE = 'deploymentconfig' - CREATABLE = True - API = 'o_api' - VALID_NAME_PATTERN = '^[a-zA-Z0-9][a-zA-Z0-9\-]+$' - - def __init__(self, provider, name, namespace): - ContainersResourceBase.__init__(self, provider, name, namespace) - - @property - def replicas(self): - return self.spec['replicas'] diff --git a/wrapanapi/containers/image.py b/wrapanapi/containers/image.py deleted file mode 100644 index 9c40cf99..00000000 --- a/wrapanapi/containers/image.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import absolute_import -from cached_property import cached_property - -from wrapanapi.containers import ContainersResourceBase - - -class Image(ContainersResourceBase): - RESOURCE_TYPE = 'image' - - def __init__(self, provider, name, image_id, image_project_name=None): - ContainersResourceBase.__init__(self, provider, name, None) - self.id = image_id - self.image_project_name = image_project_name - - def __eq__(self, other): - return self.id == getattr(other, 'id', None) - - def __repr__(self): - return '<{} name="{}" id="{}">'.format( - self.__class__.__name__, self.name, self.id) - - @staticmethod - def parse_docker_image_info(image_str): - """Splits full image name into registry, name, id and tag - - Registry and tag are optional, name and id are always present. - - Example: - /jboss-fuse-6/fis-karaf-openshift:@sha256: => - , jboss-fuse-6/fis-karaf-openshift, sha256:, - """ - registry, image_str = image_str.split('/', 1) if '/' in image_str else ('', image_str) - name, image_id = image_str.split('@') - tag = name.split(':')[-1] if ':' in image_str else (image_str, '') - return registry, name, image_id, tag - - @cached_property - def docker_image_reference(self): - return self.get().get('dockerImageReference', '') - - @cached_property - def docker_image_info(self): - return self.parse_docker_image_info(self.docker_image_reference) - - @property - def name_for_api(self): - return self.id - - @cached_property - def api(self): - return self.provider.o_api - - @cached_property - def registry(self): - return self.docker_image_info[0] - - @cached_property - def tag(self): - return self.docker_image_info[3] diff --git a/wrapanapi/containers/image_registry.py b/wrapanapi/containers/image_registry.py deleted file mode 100644 index 0843ab6b..00000000 --- a/wrapanapi/containers/image_registry.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase -from wrapanapi.exceptions import RequestFailedException -from wrapanapi.containers.image import Image - - -class ImageRegistry(ContainersResourceBase): - RESOURCE_TYPE = 'imagestream' - KIND = 'ImageStream' - API = 'o_api' - VALID_NAME_PATTERN = '^[a-zA-Z0-9][a-zA-Z0-9\-_\.]+$' - - def __init__(self, provider, name, registry, namespace): - ContainersResourceBase.__init__(self, provider, name, namespace) - self.registry = registry - full_host = registry.split('/')[0] - self.host, self.port = full_host.split(':') if ':' in full_host else (full_host, '') - - def __repr__(self): - return '<{} name="{}" host="{}" namespace="{}">'.format( - self.__class__.__name__, self.name, self.host, self.namespace) - - def import_image(self): - """Import the image from the docker registry. Returns instance of image""" - status_code, json_content = self.provider.o_api.post('imagestreamimport', { - 'apiVersion': 'v1', - 'kind': 'ImageStreamImport', - 'metadata': { - 'name': self.name, - 'namespace': self.namespace - }, - 'spec': { - 'import': True, - 'images': [{ - 'from': { - 'kind': 'DockerImage', - 'name': self.registry - }, - 'importPolicy': {}, - 'to': {'name': 'latest'} - }] - } - }, namespace=self.namespace) - if status_code not in (200, 201): - raise RequestFailedException('Failed to import image. status_code: {}; ' - 'json_content: {};' - .format(status_code, json_content)) - _, image_name, image_id, _ = Image.parse_docker_image_info( - json_content['status']['images'][-1]['image']['dockerImageReference']) - - return Image(self.provider, image_name, image_id) diff --git a/wrapanapi/containers/node.py b/wrapanapi/containers/node.py deleted file mode 100644 index 469076bf..00000000 --- a/wrapanapi/containers/node.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Node(ContainersResourceBase): - RESOURCE_TYPE = 'node' - - def __init__(self, provider, name): - ContainersResourceBase.__init__(self, provider, name, None) - - @property - def cpu(self): - return int(self.status['capacity']['cpu']) - - @property - def ready(self): - return self.status['conditions'][0]['status'] - - @property - def memory(self): - return int(round(int( - self.status['capacity']['memory'][:-2]) * 0.00000102400)) # KiB to GB diff --git a/wrapanapi/containers/pod.py b/wrapanapi/containers/pod.py deleted file mode 100644 index 8f90f9e8..00000000 --- a/wrapanapi/containers/pod.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Pod(ContainersResourceBase): - RESOURCE_TYPE = 'pod' - CREATABLE = True - - @property - def restart_policy(self): - return self.spec['restartPolicy'] - - @property - def dns_policy(self): - return self.spec['dnsPolicy'] diff --git a/wrapanapi/containers/project.py b/wrapanapi/containers/project.py deleted file mode 100644 index 95ee340f..00000000 --- a/wrapanapi/containers/project.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Project(ContainersResourceBase): - RESOURCE_TYPE = 'namespace' - CREATABLE = True - VALID_NAME_PATTERN = r'^[a-z0-9][a-z0-9\-]+$' - - def __init__(self, provider, name): - ContainersResourceBase.__init__(self, provider, name, None) - - def __repr__(self): - return '<{} name="{}">'.format( - self.__class__.__name__, self.name) diff --git a/wrapanapi/containers/providers/rhkubernetes.py b/wrapanapi/containers/providers/rhkubernetes.py deleted file mode 100644 index e7ec2adb..00000000 --- a/wrapanapi/containers/providers/rhkubernetes.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.base import WrapanapiAPIBase -from wrapanapi.rest_client import ContainerClient - -from wrapanapi.containers.container import Container -from wrapanapi.containers.pod import Pod -from wrapanapi.containers.service import Service -from wrapanapi.containers.replicator import Replicator -from wrapanapi.containers.image import Image -from wrapanapi.containers.node import Node -from wrapanapi.containers.image_registry import ImageRegistry -from wrapanapi.containers.project import Project -from wrapanapi.containers.volume import Volume - -""" -Related yaml structures: - -[cfme_data] -management_systems: - kubernetes: - name: My kubernetes - type: kubernetes - hostname: 10.12.13.14 - port: 6443 - credentials: kubernetes - authenticate: true - rest_protocol: https - -[credentials] -kubernetes: - username: admin - password: secret - token: mytoken -""" - - -class Kubernetes(WrapanapiAPIBase): - - _stats_available = { - 'num_container': lambda self: len(self.list_container()), - 'num_pod': lambda self: len(self.list_container_group()), - 'num_service': lambda self: len(self.list_service()), - 'num_replication_controller': - lambda self: len(self.list_replication_controller()), - 'num_replication_controller_labels': - lambda self: len(self.list_replication_controller_labels()), - 'num_image': lambda self: len(self.list_image()), - 'num_node': lambda self: len(self.list_node()), - 'num_image_registry': lambda self: len(self.list_image_registry()), - 'num_project': lambda self: len(self.list_project()), - } - - def __init__(self, hostname, protocol="https", port=6443, entry='api/v1', **kwargs): - self.hostname = hostname - self.username = kwargs.get('username', '') - self.password = kwargs.get('password', '') - self.token = kwargs.get('token', '') - self.auth = self.token if self.token else (self.username, self.password) - self.api = ContainerClient(hostname, self.auth, protocol, port, entry) - super(Kubernetes, self).__init__(kwargs) - - def disconnect(self): - pass - - def _parse_image_info(self, image_str): - """Splits full image name into registry, name and tag - - Both registry and tag are optional, name is always present. - - Example: - localhost:5000/nginx:latest => localhost:5000, nginx, latest - """ - registry, image_str = image_str.split('/', 1) if '/' in image_str else ('', image_str) - name, tag = image_str.split(':', 1) if ':' in image_str else (image_str, '') - return registry, name, tag - - def info(self): - """Returns information about the cluster - number of CPUs and memory in GB""" - aggregate_cpu, aggregate_mem = 0, 0 - for node in self.list_node(): - aggregate_cpu += node.cpu - aggregate_mem += node.memory - return {'cpu': aggregate_cpu, 'memory': aggregate_mem} - - def list_container(self, project_name=None): - """Returns list of containers (derived from pods) - If project_name is passed, only the containers under the selected project will be returned - """ - entities = [] - entities_j = self.api.get('pod')[1]['items'] - for entity_j in entities_j: - if project_name and project_name != entity_j['metadata']['namespace']: - continue - pod = Pod(self, entity_j['metadata']['name'], entity_j['metadata']['namespace']) - conts_j = entity_j['spec']['containers'] - for cont_j in conts_j: - cont = Container(self, cont_j['name'], pod, cont_j['image']) - if cont not in entities: - entities.append(cont) - return entities - - def list_container_group(self, project_name=None): - """Returns list of container groups (pods). - If project_name is passed, only the pods under the selected project will be returned""" - entities = [] - entities_j = self.api.get('pod')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Pod(self, meta['name'], meta['namespace']) - if project_name and project_name != meta['namespace']: - continue - entities.append(entity) - return entities - - def list_service(self, project_name=None): - """Returns list of services. - If project name is passed, only the services under the selected project will be returned""" - entities = [] - entities_j = self.api.get('service')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Service(self, meta['name'], meta['namespace']) - if project_name and project_name != meta['namespace']: - continue - entities.append(entity) - return entities - - def list_replication_controller(self): - """Returns list of replication controllers""" - entities = [] - entities_j = self.api.get('replicationcontroller')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Replicator(self, meta['name'], meta['namespace']) - entities.append(entity) - return entities - - def list_image(self): - """Returns list of images (derived from pods)""" - entities = [] - entities_j = self.api.get('pod')[1]['items'] - for entity_j in entities_j: - imgs_j = entity_j['status'].get('containerStatuses', []) - img_project_name = entity_j['metadata'].get('namespace', []) - for img_j in imgs_j: - _, name, _ = self._parse_image_info(img_j['image']) - img = Image(self, name, img_j['imageID'], img_project_name) - if img not in entities: - entities.append(img) - return entities - - def list_node(self): - """Returns list of nodes""" - entities = [] - entities_j = self.api.get('node')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Node(self, meta['name']) - entities.append(entity) - return entities - - def list_image_registry(self): - """Returns list of image registries (derived from pods)""" - entities = [] - entities_j = self.api.get('pod')[1]['items'] - for entity_j in entities_j: - imgs_j = entity_j['status'].get('containerStatuses', []) - for img_j in imgs_j: - registry, _, _ = self._parse_image_info(img_j['image']) - if not registry: - continue - host, _ = registry.split(':') if ':' in registry else (registry, '') - entity = ImageRegistry(self, host, registry, None) - if entity not in entities: - entities.append(entity) - return entities - - def list_project(self): - """Returns list of projects (namespaces in k8s)""" - entities = [] - entities_j = self.api.get('namespace')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Project(self, meta['name']) - entities.append(entity) - return entities - - def list_volume(self): - entities = [] - entities_j = self.api.get('persistentvolume')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Volume(self, meta['name']) - entities.append(entity) - return entities diff --git a/wrapanapi/containers/providers/rhopenshift.py b/wrapanapi/containers/providers/rhopenshift.py index c58a5314..101f59ee 100644 --- a/wrapanapi/containers/providers/rhopenshift.py +++ b/wrapanapi/containers/providers/rhopenshift.py @@ -15,13 +15,7 @@ from openshift import client as ociclient from wait_for import wait_for, TimedOutError -from wrapanapi.containers.providers.rhkubernetes import Kubernetes -from wrapanapi.rest_client import ContainerClient -from wrapanapi.containers.route import Route -from wrapanapi.containers.image_registry import ImageRegistry -from wrapanapi.containers.project import Project -from wrapanapi.containers.image import Image -from wrapanapi.containers.deployment_config import DeploymentConfig +from wrapanapi.base import WrapanapiAPIBase # stolen from sprout @@ -107,13 +101,21 @@ def wrap(*args, **kwargs): @reconnect(unauthenticated_error_handler) -class Openshift(Kubernetes): - - _stats_available = Kubernetes._stats_available.copy() - _stats_available.update({ +class Openshift(WrapanapiAPIBase): + + _stats_available = { + 'num_container': lambda self: len(self.list_container()), + 'num_pod': lambda self: len(self.list_pods()), + 'num_service': lambda self: len(self.list_service()), + 'num_replication_controller': + lambda self: len(self.list_replication_controller()), + 'num_image': lambda self: len(self.list_image_id()), + 'num_node': lambda self: len(self.list_node()), + 'num_image_registry': lambda self: len(self.list_image_registry()), + 'num_project': lambda self: len(self.list_project()), 'num_route': lambda self: len(self.list_route()), 'num_template': lambda self: len(self.list_template()) - }) + } stream2template_tags_mapping = { 'cfme-openshift-httpd': 'HTTPD_IMG_TAG', @@ -136,10 +138,9 @@ class Openshift(Kubernetes): required_project_pods58 = ('memcached', 'postgresql', 'cloudforms') not_required_project_pods = ('cloudforms-backend', 'ansible') - def __init__(self, hostname, protocol="https", port=8443, k_entry="api/v1", o_entry="oapi/v1", - debug=False, verify_ssl=False, **kwargs): + def __init__(self, hostname, protocol="https", port=8443, debug=False, + verify_ssl=False, **kwargs): super(Openshift, self).__init__(kwargs) - self.new_client = kwargs.get('new_client', False) self.hostname = hostname self.protocol = protocol self.port = port @@ -150,92 +151,137 @@ def __init__(self, hostname, protocol="https", port=8443, k_entry="api/v1", o_en self.auth = self.token if self.token else (self.username, self.password) self.debug = debug self.verify_ssl = verify_ssl - self.list_image_openshift = self.list_docker_image # For backward compatibility - self.old_k_api = self.k_api = ContainerClient(hostname, self.auth, protocol, port, k_entry) - self.old_o_api = self.o_api = ContainerClient(hostname, self.auth, protocol, port, o_entry) - self.api = self.k_api # default api is the kubernetes one for Kubernetes-class requests self._connect() def _connect(self): - if self.new_client: - url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, - port=self.port) - - token = 'Bearer {token}'.format(token=self.token) - config = ociclient.Configuration() - config.host = url - config.verify_ssl = self.verify_ssl - config.debug = self.debug - config.api_key['authorization'] = token - - self.ociclient = ociclient - self.kclient = kubeclient - self.oapi_client = ociclient.ApiClient(config=config) - self.kapi_client = kubeclient.ApiClient(config=config) - self.o_api = ociclient.OapiApi(api_client=self.oapi_client) - self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) - - def list_route(self): + url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, + port=self.port) + + token = 'Bearer {token}'.format(token=self.token) + config = ociclient.Configuration() + config.host = url + config.verify_ssl = self.verify_ssl + config.debug = self.debug + config.api_key['authorization'] = token + + self.ociclient = ociclient + self.kclient = kubeclient + self.oapi_client = ociclient.ApiClient(config=config) + self.kapi_client = kubeclient.ApiClient(config=config) + self.o_api = ociclient.OapiApi(api_client=self.oapi_client) + self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) + + def list_route(self, namespace=None): """Returns list of routes""" - entities = [] - entities_j = self.o_api.get('route')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Route(self, meta['name'], meta['namespace']) - entities.append(entity) - return entities - - def list_docker_registry(self): - """Returns list of docker registries""" - entities = [] - entities_j = self.o_api.get('imagestream')[1]['items'] - for entity_j in entities_j: - if 'dockerImageRepository' not in entity_j['status']: - continue - meta = entity_j['metadata'] - entity = ImageRegistry(self, meta['name'], - entity_j['status']['dockerImageRepository'], - meta['namespace']) - if entity not in entities: - entities.append(entity) - return entities + if namespace: + routes = self.o_api.list_namespaced_route(namespace=namespace).items + else: + routes = self.o_api.list_route_for_all_namespaces().items + return routes + + def list_image_streams(self, namespace=None): + """Returns list of image streams""" + if namespace: + image_streams = self.o_api.list_namespaced_image_stream(namespace=namespace).items + else: + image_streams = self.o_api.list_image_stream_for_all_namespaces().items + return image_streams def list_project(self): """Returns list of projects""" - entities = [] - entities_j = self.o_api.get('project')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = Project(self, meta['name']) - entities.append(entity) - return entities + return self.o_api.list_project().items def list_template(self, namespace=None): """Returns list of templates""" - namespace = namespace if namespace else self.default_namespace - return [t.metadata.name for t in self.o_api.list_namespaced_template(namespace).items] + if namespace: + return [t.metadata.name for t in self.o_api.list_namespaced_template(namespace).items] + else: + return [t.metadata.name for t in self.o_api.list_template_for_all_namespaces().items] - def list_docker_image(self): + def list_image_stream_images(self): """Returns list of images (Docker registry only)""" - entities = [] - entities_j = self.o_api.get('image')[1]['items'] - for entity_j in entities_j: - if 'dockerImageReference' not in entity_j: - continue - _, name, image_id, _ = Image.parse_docker_image_info(entity_j['dockerImageReference']) - entities.append(Image(self, name, image_id)) - return entities + return [item for item in self.o_api.list_image().items + if item.docker_image_reference is not None] - def list_deployment_config(self): + def list_deployment_config(self, namespace=None): """Returns list of deployment configs""" - entities = [] - entities_j = self.o_api.get('deploymentconfig')[1]['items'] - for entity_j in entities_j: - meta = entity_j['metadata'] - entity = DeploymentConfig(self, meta['name'], meta['namespace']) - entities.append(entity) - return entities + if namespace: + dc = self.o_api.list_namespaced_deployment_config(namespace=namespace).items + else: + dc = self.o_api.list_deployment_config_for_all_namespaces().items + return dc + + def list_service(self, namespace=None): + """Returns list of services.""" + if namespace: + svc = self.k_api.list_namespaced_service(namespace=namespace).items + else: + svc = self.k_api.list_service_for_all_namespaces().items + return svc + + def list_replication_controller(self, namespace=None): + """Returns list of replication controllers""" + if namespace: + rc = self.k_api.list_namespaced_replication_controller(namespace=namespace).items + else: + rc = self.k_api.list_replication_controller_for_all_namespaces().items + return rc + + def list_node(self): + """Returns list of nodes""" + nodes = self.k_api.list_node().items + return nodes + + def cluster_info(self): + """Returns information about the cluster - number of CPUs and memory in GB""" + aggregate_cpu, aggregate_mem = 0, 0 + for node in self.list_node(): + aggregate_cpu += int(node.status.capacity['cpu']) + aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) + + return {'cpu': aggregate_cpu, 'memory': aggregate_mem} + + def list_persistent_volume(self): + """Returns list of persistent volumes""" + pv = self.k_api.list_persistent_volume().items + return pv + + def list_pods(self, namespace=None): + """Returns list of container groups (pods). + If project_name is passed, only the pods under the selected project will be returned""" + if namespace: + pods = self.k_api.list_namespaced_pod(namespace=namespace).items + else: + pods = self.k_api.list_pod_for_all_namespaces().items + return pods + + def list_container(self, namespace=None): + """Returns list of containers (derived from pods) + If project_name is passed, only the containers under the selected project will be returned + """ + pods = self.list_pods(namespace=namespace) + containers = [] + containers.extend([pod.spec.containers for pod in pods]) + return containers + + def list_image_id(self, namespace=None): + """Returns list of image ids (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.status.container_statuses: + statuses.append(status) + return [status.image_id for status in statuses] + + def list_image_registry(self, namespace=None): + """Returns list of image registries (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.status.container_statuses: + statuses.append(status) + return [status.image for status in statuses] def deploy_template(self, template, tags=None, password='smartvm', **kwargs): """Deploy a VM from a template @@ -1001,7 +1047,7 @@ def check_scale_value(): def get_project_by_name(self, project_name): """Returns only the selected Project object""" - return Project(self, project_name) + return next(proj for proj in self.list_project() if proj.metadata.name == project_name) def is_vm_running(self, vm_name): """Emulates check is vm(appliance) up and running @@ -1154,7 +1200,7 @@ def in_steady_state(self, vm_name): def can_rename(self): return hasattr(self, "rename_vm") - def list_project_(self): + def list_project_names(self): """Obtains project names Returns: list of project names @@ -1162,7 +1208,7 @@ def list_project_(self): projects = self.o_api.list_project().items return [proj.metadata.name for proj in projects] - list_vm = list_project_ + list_vm = list_project_names def get_appliance_version(self, vm_name): """Returns appliance version if it is possible @@ -1285,3 +1331,6 @@ def get_ip_address(self, vm_name, timeout=600): except TimedOutError: ip_address = None return ip_address + + def disconnect(self): + pass diff --git a/wrapanapi/containers/replicator.py b/wrapanapi/containers/replicator.py deleted file mode 100644 index 7e5f7df2..00000000 --- a/wrapanapi/containers/replicator.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Replicator(ContainersResourceBase): - RESOURCE_TYPE = 'replicationcontroller' - KIND = 'ReplicationController' - CREATABLE = True - - @property - def replicas(self): - return self.spec['replicas'] - - @property - def current_replicas(self): - return self.status['replicas'] diff --git a/wrapanapi/containers/route.py b/wrapanapi/containers/route.py deleted file mode 100644 index 7768f6ce..00000000 --- a/wrapanapi/containers/route.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Route(ContainersResourceBase): - RESOURCE_TYPE = 'route' - CREATABLE = True - API = 'o_api' diff --git a/wrapanapi/containers/service.py b/wrapanapi/containers/service.py deleted file mode 100644 index 9803ed40..00000000 --- a/wrapanapi/containers/service.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Service(ContainersResourceBase): - RESOURCE_TYPE = 'service' - CREATABLE = True - - @property - def portal_ip(self): - return self.spec['clusterIP'] - - @property - def session_affinity(self): - return self.spec['sessionAffinity'] diff --git a/wrapanapi/containers/template.py b/wrapanapi/containers/template.py deleted file mode 100644 index e1bcb4a9..00000000 --- a/wrapanapi/containers/template.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Template(ContainersResourceBase): - RESOURCE_TYPE = 'template' - CREATABLE = True - API = 'o_api' diff --git a/wrapanapi/containers/volume.py b/wrapanapi/containers/volume.py deleted file mode 100644 index df098eac..00000000 --- a/wrapanapi/containers/volume.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import -from wrapanapi.containers import ContainersResourceBase - - -class Volume(ContainersResourceBase): - RESOURCE_TYPE = 'persistentvolume' - KIND = 'PersistentVolume' - CREATABLE = True - - def __init__(self, provider, name): - ContainersResourceBase.__init__(self, provider, name, None) - - def __repr__(self): - return '<{} name="{}" capacity="{}">'.format( - self.__class__.__name__, self.name, self.capacity) - - @property - def capacity(self): - return self.spec['capacity']['storage'] - - @property - def accessmodes(self): - self.spec['accessModes'] From cc3134adc94f2a87b300cacd8af90ec038367be7 Mon Sep 17 00:00:00 2001 From: izapolsk Date: Fri, 8 Jun 2018 19:10:15 +0300 Subject: [PATCH 2/2] temporarily removed openshift tests. those will be restored soon when wrapanapi->ocp is switched to dynamic client --- tests/test_openshift.py | 602 ------------------ wrapanapi/containers/providers/rhopenshift.py | 5 +- 2 files changed, 2 insertions(+), 605 deletions(-) delete mode 100644 tests/test_openshift.py diff --git a/tests/test_openshift.py b/tests/test_openshift.py deleted file mode 100644 index 42384520..00000000 --- a/tests/test_openshift.py +++ /dev/null @@ -1,602 +0,0 @@ -# -*- coding: utf-8 -*- -"""Unit tests for Openshift client.""" -from __future__ import absolute_import -import os -from random import choice - -import pytest -import mock -import fauxfactory -from wait_for import wait_for - -from wrapanapi.containers.providers import rhopenshift -from wrapanapi.containers.project import Project -from wrapanapi.containers.deployment_config import DeploymentConfig -from wrapanapi.containers.image_registry import ImageRegistry -from wrapanapi.containers.image import Image -from wrapanapi.containers.pod import Pod -from wrapanapi.containers.service import Service -from wrapanapi.containers.node import Node -from wrapanapi.containers.replicator import Replicator -from wrapanapi.containers.route import Route -from wrapanapi.containers.template import Template -from wrapanapi.containers.volume import Volume - -from wrapanapi.exceptions import InvalidValueException, ResourceAlreadyExistsException,\ - UncreatableResourceException - - -# Specify whether to use a mock provider or real one. -MOCKED = os.environ.get('MOCKED', 'true').lower() == 'true' -# If you prefer to use a real provider, provide HOSTNAME, USERNAME and TOKEN -HOSTNAME = os.environ.get('HOSTNAME') -USERNAME = os.environ.get('USERNAME') -TOKEN = os.environ.get('TOKEN') - -FIXTURES_SCOPES = ('function' if MOCKED else 'module') - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def provider(): - if MOCKED: - ocp = rhopenshift.Openshift('openshift.test.com', username='default') - with mock.patch('wrapanapi.rest_client.ContainerClient') as client: - ocp.o_api = ocp.api = ocp.k_api = client - else: - return rhopenshift.Openshift(HOSTNAME, username=USERNAME, token=TOKEN) - return ocp - - -def gen_docker_image_reference(): - """Generating a docker image reference including image ID. - returns the docker image reference and image ID""" - image_id = 'sha256:some-long-fake-id-with-numbers-{}' - docker_image_refrence = 'this.is.some.fake.{}/registry:{}@{}'.format( - fauxfactory.gen_alpha().lower(), fauxfactory.gen_numeric_string(3), image_id) - return docker_image_refrence, image_id - - -def mocked_image_data(): - out = [200, {'items': []}] - for i in range(fauxfactory.gen_integer(2, 20)): - dockerImageReference, imageID = gen_docker_image_reference() - out[1]['items'].append({ - 'metadata': { - 'name': 'mockedimage{}'.format(i), - 'namespace': choice(('default', 'openshift-infra', 'kube-system')) - } - }) - out[1]['items'][-1]['dockerImageReference'] = \ - dockerImageReference.format(fauxfactory.gen_numeric_string()) - out[1]['items'][-1]['status'] = { - 'dockerImageRepository': dockerImageReference, - 'containerStatuses': [ - { - 'image': out[1]['items'][-1]['dockerImageReference'], - 'imageID': imageID.format(fauxfactory.gen_numeric_string(64)) - } - for _ in range(fauxfactory.gen_integer(2, 20)) - ] - } - return out - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_project(provider): - return Project(provider, fauxfactory.gen_alpha().lower()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_image(provider): - if MOCKED: - return Image(provider, 'some.test.image', 'sha256:{}' - .format(fauxfactory.gen_alphanumeric(64))) - return choice(provider.list_docker_image()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_pod(provider): - if MOCKED: - return Pod(provider, 'some-test-pod', 'default') - return choice(provider.list_container_group()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_service(provider): - if MOCKED: - return Service(provider, 'some-test-service', 'default') - return choice(provider.list_service()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_node(provider): - if MOCKED: - return Node(provider, 'openshift-node.test.com') - return choice(provider.list_node()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_replicator(provider): - if MOCKED: - return Replicator(provider, 'some-test-replicator', 'default') - return choice(provider.list_replication_controller()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_route(provider): - if MOCKED: - return Route(provider, 'some.test.route.com', 'default') - return choice(provider.list_route()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_template(provider): - if MOCKED: - return Template(provider, 'some-test-template', 'default') - return choice(provider.list_template()) - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_image_registry(provider): - return ImageRegistry(provider, 'openshift-hello-openshift', - 'docker.io/openshift/hello-openshift', 'default') - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_volume(provider): - return Volume(provider, 'my-test-persistent-volume') - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def gen_dc(provider): - return DeploymentConfig(provider, fauxfactory.gen_alpha().lower(), 'default') - - -@pytest.fixture(scope=FIXTURES_SCOPES) -def label(): - return (fauxfactory.gen_alpha().lower(), fauxfactory.gen_alpha().lower()) - - -def wait_for_existence(resource, exist=True, timeout='1M'): - """Wait for the object to exist (or not exist if is False).""" - return wait_for( - lambda: resource.exists() == exist, - message="Waiting for {} {} to {}exist..." - .format(resource.RESOURCE_TYPE, resource.name, ('' if exist else 'not ')), - delay=5, timeout=timeout - ) - - -def base__test_label_create(resource, label_key, label_value): - if MOCKED: - resource.provider.api.patch.return_value = \ - resource.provider.o_api.patch.return_value = [201, {}] - resource.provider.api.get.return_value = \ - resource.provider.o_api.get.return_value = [200, { - 'metadata': {'labels': {label_key: label_value}}}] - res = resource.set_label(label_key, label_value) - assert res[0] in (200, 201) - assert wait_for(lambda: label_key in resource.list_labels(), - message="Waiting for label {} of {} {} to exist..." - .format(label_key, type(resource).__name__, resource.name), - delay=5, timeout='1M').out - - -def base__test_label_delete(resource, label_key): - if MOCKED: - resource.provider.api.patch.return_value = \ - resource.provider.o_api.patch.return_value = [200, {}] - resource.provider.api.get.return_value = \ - resource.provider.o_api.get.return_value = [200, { - 'metadata': {'labels': {label_key: 'doesntmatter'}}}] - res = resource.delete_label(label_key) - assert res[0] == 200 - if MOCKED: - resource.provider.api.get.return_value = \ - resource.provider.o_api.get.return_value = [200, { - 'metadata': {'labels': {}}}] - assert wait_for(lambda: label_key not in resource.list_labels(), - message="Waiting for label {} of {} {} to be deleted..." - .format(label_key, type(resource).__name__, resource.name), - delay=5, timeout='1M').out - - -def base__test_create(provider, resource_class, payload): - if MOCKED: - provider.o_api.post.return_value = [201, {}] - provider.o_api.get.return_value = [409, {}] - resource = resource_class.create(provider, payload) - if MOCKED: - provider.o_api.get.return_value = [200, {}] - assert isinstance(resource, resource_class) - assert wait_for_existence(resource) - return resource - - -def base__test_delete(provider, resource): - if MOCKED: - provider.o_api.delete.return_value = [200, {}] - provider.o_api.get.return_value = [409, {}] - res = resource.delete() - assert res[0] == 200 - assert wait_for_existence(resource, exist=False) - - -@pytest.mark.incremental -class TestProject(object): - def test_list(self, provider): - if MOCKED: - provider.o_api.get.return_value = [200, { - 'items': [ - {'metadata': {'name': 'mockedprject{}'.format(i)}} - for i in range(fauxfactory.gen_integer(2, 20)) - ] - }] - assert all([isinstance(inst, Project) for inst in provider.list_project()]) - - def test_project_create(self, provider, gen_project): - payload = {"metadata": {"name": gen_project.name}} - assert base__test_create(provider, Project, payload) == gen_project - - def test_already_exists(self, provider, gen_project): - if MOCKED: - provider.api.get.return_value = [200, {}] - with pytest.raises(ResourceAlreadyExistsException): - payload = {'metadata': {'name': gen_project.name}} - gen_project.create(provider, payload) - - def test_labels_create(self, provider, gen_project, label): - base__test_label_create(gen_project, label[0], label[1]) - - def test_labels_delete(self, provider, gen_project, label): - base__test_label_delete(gen_project, label[0]) - - def test_project_delete(self, provider, gen_project): - base__test_delete(provider, gen_project) - - def test_invalid_name(self): - with pytest.raises(InvalidValueException): - Project(provider, 'this_is_invalid_project_name') - Project(provider, 'this/is/invalid/project/name/as/well') - - -@pytest.mark.incremental -class TestImage(object): - def test_list(self, provider): - if MOCKED: - provider.o_api.get.return_value = provider.api.get.return_value = mocked_image_data() - assert all([isinstance(inst, Image) for inst in provider.list_docker_image()]) - assert all([isinstance(inst, Image) for inst in provider.list_image()]) - assert all([isinstance(inst, Image) for inst in provider.list_image_openshift()]) - - def test_labels_create(self, provider, gen_image, label): - base__test_label_create(gen_image, label[0], label[1]) - - def test_labels_delete(self, provider, gen_image, label): - base__test_label_delete(gen_image, label[0]) - - def test_properties(self, gen_image): - # Just test that there are no errors when we try to get properties - if MOCKED: - gen_image.api.get.return_value = [200, { - 'dockerImageReference': 'this.is.some.fake/registry:{}' - '@sha256:some-long-fake-id-with-numbers-{}' - .format(fauxfactory.gen_numeric_string(3), fauxfactory.gen_numeric_string(64)) - }] - gen_image.registry, gen_image.tag - - -@pytest.mark.incremental -class TestPod(object): - def test_labels_create(self, provider, gen_pod, label): - base__test_label_create(gen_pod, label[0], label[1]) - - def test_labels_delete(self, provider, gen_pod, label): - base__test_label_delete(gen_pod, label[0]) - - def test_properties(self, gen_pod): - # Just test that there are no errors when we try to get properties - if MOCKED: - gen_pod.provider.api.get.return_value = [200, { - 'spec': { - 'restartPolicy': 'Always', - 'dnsPolicy': 'Sometimes' - } - }] - gen_pod.restart_policy, gen_pod.dns_policy - - -@pytest.mark.incremental -class TestService(object): - def test_labels_create(self, provider, gen_service, label): - base__test_label_create(gen_service, label[0], label[1]) - - def test_labels_delete(self, provider, gen_service, label): - base__test_label_delete(gen_service, label[0]) - - def test_properties(self, gen_service): - # Just test that there are no errors when we try to get properties - if MOCKED: - gen_service.provider.api.get.return_value = [200, { - 'spec': { - 'sessionAffinity': 'ClientIP', - 'clusterIP': '127.0.0.1' - } - }] - gen_service.portal_ip, gen_service.session_affinity - - -@pytest.mark.incremental -class TestRoute(object): - def test_create(self, provider, gen_route, gen_service): - if MOCKED: - service = gen_service - else: - service = provider.list_service().pop() - payload = { - 'metadata': { - 'name': 'route-to-{}'.format(service.name), - 'namespace': service.namespace - }, - 'spec': { - 'host': 'www.example.com', - 'to': { - 'Kind': service.kind(), - 'name': service.name - } - } - } - base__test_create(provider, Route, payload) - - def test_labels_create(self, provider, gen_route, label): - base__test_label_create(gen_route, label[0], label[1]) - - def test_labels_delete(self, provider, gen_route, label): - base__test_label_delete(gen_route, label[0]) - - def test_delete(self, provider, gen_route): - base__test_delete(provider, gen_route) - - -@pytest.mark.incremental -class TestNode(object): - def test_labels_create(self, provider, gen_node, label): - base__test_label_create(gen_node, label[0], label[1]) - - def test_labels_delete(self, provider, gen_node, label): - base__test_label_delete(gen_node, label[0]) - - def test_properties(self, gen_node): - # Just test that there are no errors when we try to get properties - if MOCKED: - gen_node.provider.api.get.return_value = [200, { - 'status': { - 'capacity': { - 'cpu': fauxfactory.gen_integer(1, 8), - 'memory': '{}kb'.format(fauxfactory.gen_numeric_string()) - }, - 'conditions': [{'status': 'Running'}] - } - }] - gen_node.cpu, gen_node.ready, gen_node.memory - - -@pytest.mark.incremental -class TestReplicator(object): - def test_labels_create(self, provider, gen_replicator, label): - base__test_label_create(gen_replicator, label[0], label[1]) - - def test_labels_delete(self, provider, gen_replicator, label): - base__test_label_delete(gen_replicator, label[0]) - - def test_properties(self, gen_replicator): - # Just test that there are no errors when we try to get properties - if MOCKED: - replicas = fauxfactory.gen_integer(1, 50) - gen_replicator.provider.api.get.return_value = [200, { - 'spec': {'replicas': replicas}, - 'status': {'replicas': replicas} - }] - gen_replicator.replicas, gen_replicator.current_replicas - - -@pytest.mark.incremental -class TestTemplate(object): - def test_labels_create(self, provider, gen_replicator, label): - base__test_label_create(gen_replicator, label[0], label[1]) - - def test_labels_delete(self, provider, gen_replicator, label): - base__test_label_delete(gen_replicator, label[0]) - - -@pytest.mark.incremental -class TestDeploymentConfig(object): - def test_list(self, provider): - if MOCKED: - provider.api.post.return_value = provider.o_api.get.return_value = [200, { - 'items': [ - { - 'metadata': { - 'name': fauxfactory.gen_alphanumeric(), - 'namespace': choice(('default', 'openshift-infra', 'kube-system')) - }, - 'spec': { - 'template': {'spec': {'containers': [ - {'image': 'img{}'.format(i)} - for i in range(fauxfactory.gen_integer(1, 10)) - ]}}, - 'replicas': fauxfactory.gen_integer(1, 50) - } - } - for _ in range(fauxfactory.gen_integer(1, 30)) - ] - }] - assert all([isinstance(inst, DeploymentConfig) - for inst in provider.list_deployment_config()]) - - def test_dc_create(self, provider, gen_dc): - payload = { - 'metadata': { - 'name': gen_dc.name, - 'namespace': gen_dc.namespace - }, - 'spec': { - 'replicas': 1, - 'test': False, - 'triggers': [ - { - 'type': 'ConfigChange' - } - ], - 'strategy': { - 'activeDeadlineSeconds': 21600, - 'resources': {}, - 'rollingParams': { - 'intervalSeconds': 1, - 'maxSurge': '25%', - 'maxUnavailable': '25%', - 'timeoutSeconds': 600, - 'updatePeriodSeconds': 1 - }, - 'type': 'Rolling' - }, - 'template': { - 'metadata': { - 'labels': { - 'run': gen_dc.name - } - }, - 'spec': { - 'containers': [ - { - 'image': 'openshift/hello-openshift', - 'imagePullPolicy': 'Always', - 'name': gen_dc.name, - 'ports': [ - { - 'containerPort': 8080, - 'protocol': 'TCP' - } - ], - 'resources': {}, - 'terminationMessagePath': '/dev/termination-log' - } - ], - 'dnsPolicy': 'ClusterFirst', - 'restartPolicy': 'Always', - 'securityContext': {}, - 'terminationGracePeriodSeconds': 30 - } - } - }, - 'status': { - 'replicas': 1, - 'latestVersion': 1, - 'observedGeneration': 2, - 'updatedReplicas': 1, - 'availableReplicas': 1, - 'unavailableReplicas': 0 - } - } - assert base__test_create(provider, DeploymentConfig, payload) == gen_dc - - def test_already_exists(self, provider, gen_dc): - if MOCKED: - provider.api.get.return_value = [200, {}] - with pytest.raises(ResourceAlreadyExistsException): - payload = {'metadata': {'name': gen_dc.name, 'namespace': gen_dc.namespace}} - DeploymentConfig.create(provider, payload) - - def test_labels_create(self, provider, gen_dc, label): - base__test_label_create(gen_dc, label[0], label[1]) - - def test_labels_delete(self, provider, gen_dc, label): - base__test_label_delete(gen_dc, label[0]) - - def test_dc_delete(self, provider, gen_dc): - base__test_delete(provider, gen_dc) - - def test_invalid_name(self): - with pytest.raises(InvalidValueException): - DeploymentConfig(provider, 'this_is_invalid_dc_name', 'default') - DeploymentConfig(provider, 'this/is/invalid/dc/name/as/well', 'default') - - -@pytest.mark.incremental -class TestImageRegistry(object): - def test_list(self, provider): - if MOCKED: - provider.o_api.get.return_value = provider.api.get.return_value = mocked_image_data() - assert all([isinstance(inst, ImageRegistry) - for inst in provider.list_image_registry()]) - assert all([isinstance(inst, ImageRegistry) - for inst in provider.list_docker_registry()]) - - def test_import_image(self, provider, gen_image_registry): - if MOCKED: - docker_image_reference = gen_docker_image_reference()[0] - provider.o_api.post.return_value = provider.o_api.get.return_value = [200, { - 'status': { - 'dockerImageRepository': docker_image_reference, - 'images': [{'image': {'dockerImageReference': docker_image_reference}}] - } - }] - provider.o_api.delete.return_value = [200, {}] - image = gen_image_registry.import_image() - assert image.exists() - image.delete() - - def test_labels_create(self, provider, gen_image_registry, label): - base__test_label_create(gen_image_registry, label[0], label[1]) - - def test_labels_delete(self, provider, gen_image_registry, label): - base__test_label_delete(gen_image_registry, label[0]) - - def test_invalid_name(self): - with pytest.raises(InvalidValueException): - ImageRegistry(provider, 'this/is/invalid/name', - 'docker.io/openshift/hello-openshift', 'default') - - -@pytest.mark.incremental -class TestVolume(object): - def test_create(self, provider, gen_volume): - payload = { - 'metadata': {'name': gen_volume.name}, - 'spec': { - 'accessModes': ['ReadWriteOnce'], - 'capacity': {'storage': '1Gi'}, - 'nfs': { - 'path': '/tmp', - 'server': '12.34.56.78' - } - }, - 'persistentVolumeReclaimPolicy': 'Retain' - } - assert base__test_create(provider, Volume, payload) == gen_volume - - def test_labels_create(self, provider, gen_volume, label): - base__test_label_create(gen_volume, label[0], label[1]) - - def test_labels_delete(self, provider, gen_volume, label): - base__test_label_delete(gen_volume, label[0]) - - def test_properties(self, gen_volume): - # Just test that there are no errors when we try to get properties - if MOCKED: - gen_volume.api.get.return_value = [200, { - 'spec': { - 'capacity': {'storage': '5Gib'}, - 'accessModes': ['ReadOnlyMany'] - } - }] - gen_volume.capacity, gen_volume.accessmodes - - def test_delete(self, provider, gen_volume): - base__test_delete(provider, gen_volume) - - -def test_uncreatable(provider, gen_image, gen_image_registry): - with pytest.raises(UncreatableResourceException): - gen_image.create(provider, {}) - gen_image_registry.create(provider, {}) diff --git a/wrapanapi/containers/providers/rhopenshift.py b/wrapanapi/containers/providers/rhopenshift.py index 101f59ee..f1e0c316 100644 --- a/wrapanapi/containers/providers/rhopenshift.py +++ b/wrapanapi/containers/providers/rhopenshift.py @@ -238,6 +238,7 @@ def cluster_info(self): aggregate_cpu, aggregate_mem = 0, 0 for node in self.list_node(): aggregate_cpu += int(node.status.capacity['cpu']) + # converting KiB to GB. 1KiB = 1.024E-6 GB aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) return {'cpu': aggregate_cpu, 'memory': aggregate_mem} @@ -261,9 +262,7 @@ def list_container(self, namespace=None): If project_name is passed, only the containers under the selected project will be returned """ pods = self.list_pods(namespace=namespace) - containers = [] - containers.extend([pod.spec.containers for pod in pods]) - return containers + return [pod.spec.containers for pod in pods] def list_image_id(self, namespace=None): """Returns list of image ids (derived from pods)"""