diff --git a/README.md b/README.md index 68f95801905..b85060e20e9 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,8 @@ Oracle is finding ways for organizations using WebLogic Server to run important The fastest way to experience the operator is to follow the [Quick Start guide](https://oracle.github.io/weblogic-kubernetes-operator/quickstart/), or you can peruse our [documentation](https://oracle.github.io/weblogic-kubernetes-operator), read our [blogs](https://blogs.oracle.com/weblogicserver/updated-weblogic-kubernetes-support-with-operator-20), or try out the [samples](https://oracle.github.io/weblogic-kubernetes-operator/samples/). *** -The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.1. -This release was published on August 13, 2020. +The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.2. +This release was published on September 15, 2020. *** # Documentation diff --git a/buildDockerImage.sh b/buildDockerImage.sh index ffd2836a119..a32592084b1 100755 --- a/buildDockerImage.sh +++ b/buildDockerImage.sh @@ -33,7 +33,7 @@ while getopts "t:" optname; do esac done -IMAGE_NAME=${name:-oracle/weblogic-kubernetes-operator:3.0.1} +IMAGE_NAME=${name:-oracle/weblogic-kubernetes-operator:3.0.2} SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )" # Proxy settings diff --git a/buildtime-reports/pom.xml b/buildtime-reports/pom.xml index 839cbe9e98b..a0010d8267e 100644 --- a/buildtime-reports/pom.xml +++ b/buildtime-reports/pom.xml @@ -8,7 +8,7 @@ operator-parent oracle.kubernetes - 3.0.1 + 3.0.2 buildtime-reports diff --git a/docs-source/content/_index.md b/docs-source/content/_index.md index 44eecfdf350..25cbc956053 100644 --- a/docs-source/content/_index.md +++ b/docs-source/content/_index.md @@ -23,8 +23,8 @@ using the operator to deploy and run a WebLogic domain container-packaged web ap *** #### Current production release -The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.1. -This release was published on August 13, 2020. See the operator prerequisites and supported environments [here]({{< relref "/userguide/introduction/introduction#operator-prerequisites" >}}). +The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.2. +This release was published on September 15, 2020. See the operator prerequisites and supported environments [here]({{< relref "/userguide/introduction/introduction#operator-prerequisites" >}}). This release introduces _non-backward compatible_ changes; however, operators using this release can be run in the same Kubernetes cluster as operators using the 2.6.0 version allowing for staged migration. You can replace a 2.6.0 operator with a 3.x operator without needing to recreate any existing domains; however, you must delete the 2.6.0 Helm release and then install the 3.x version rather than using a Helm upgrade. When the 3.x operator starts, it will roll any running WebLogic Server instances diff --git a/docs-source/content/faq/external-clients.md b/docs-source/content/faq/external-clients.md index 0cfd7c1373b..50362c2d134 100644 --- a/docs-source/content/faq/external-clients.md +++ b/docs-source/content/faq/external-clients.md @@ -3,36 +3,68 @@ title: "External WebLogic clients" date: 2019-11-21T21:23:03Z draft: false weight: 11 -description: "There are two supported approaches for giving external WebLogic EJB or JMS clients access to a Kubernetes hosted WebLogic cluster: load balancer tunneling and Kubernetes `NodePorts`." +description: "This FAQ describes approaches for giving external WebLogic clients or servers access to a Kubernetes hosted WebLogic cluster JMS or EJBs, and for giving Kubernetes hosted clients or servers access to remotely hosted WebLogic JMS or EJBs." --- -#### Approaches +#### Contents -There are two supported approaches for giving external WebLogic EJB or JMS clients access to a Kubernetes hosted WebLogic cluster: [Load balancer tunneling](#load-balancer-tunneling) and [Kubernetes `NodePorts`](#kubernetes-nodeports). +- [Overview](#overview) +- [Load balancer tunneling](#load-balancer-tunneling) +- [Kubernetes `NodePorts`](#kubernetes-nodeports) +- [Adding a WebLogic custom channel](#adding-a-weblogic-custom-channel) + - [When is a WebLogic custom channel needed?](#when-is-a-weblogic-custom-channel-needed) + - [Configuring a WebLogic custom channel](#configuring-a-weblogic-custom-channel) + - [WebLogic custom channel notes](#weblogic-custom-channel-notes) +- [Setting up a `NodePort`](#setting-up-a-nodeport) + - [Getting started](#getting-started) + - [Sample `NodePort` resource](#sample-nodeport-resource) + - [Table of `NodePort` attributes](#table-of-nodeport-attributes) +- [Security notes](#security-notes) +- [Enabling unknown host access](#enabling-unknown-host-access) + - [When is it necessary to enable unknown host access?](#when-is-it-necessary-to-enable-unknown-host-access) + - [How to enable unknown host access](#how-to-enable-unknown-host-access) +- [Optional reading](#optional-reading) + +#### Overview + +When WebLogic clients and servers are external to Kubernetes, and you want to give them access to Kubernetes hosted WebLogic cluster EJBs and JMS, there are two supported approaches: + + * [Load balancer tunneling](#load-balancer-tunneling) (preferred) + * [Kubernetes `NodePorts`](#kubernetes-nodeports) + +Conversely, if giving a Kubernetes hosted WebLogic Server access to external WebLogic Server EJBs, JMS, or JTA, then consider the following: + + * You may need to [enable unknown host access](#enabling-unknown-host-access) on the external WebLogic Servers. + * Plus, if the target server can be accessed only through a load balancer using HTTP: + * [Set up an HTTP tunneling-enabled custom channel](#adding-a-weblogic-custom-channel) on the external WebLogic Servers. + * Specify URLs on the source server that resolve to the load balancer's address and that start with `http` instead of `t3`. + * Ensure the load balancer configures the HTTP flow to be 'sticky'. {{% notice note %}} -This FAQ is for remote EJB and JMS clients - not JTA clients. The operator does not currently support external WebLogic JTA access to a WebLogic cluster, because external JTA access requires each server in the cluster to be individually addressable by the client, but this conflicts with the current operator requirement that a network channel in a cluster have the same port across all servers in the cluster. +The operator does not currently support external WebLogic JTA access to a Kubernetes hosted WebLogic cluster. This is because external JTA access requires each server in a cluster to be individually addressable, but this conflicts with the current operator requirement that a network channel in a cluster have the same port across all servers in the cluster. {{% /notice %}} -##### Load balancer tunneling +#### Load balancer tunneling -The load balancer tunneling approach for giving external WebLogic EJB or JMS clients access to a Kubernetes hosted WebLogic cluster involves configuring a network channel on the desired WebLogic cluster that accepts T3 protocol traffic that's tunneled over HTTP, deploying a load balancer that redirects external HTTP network traffic to the desired WebLogic network channel, and ensuring that EJB and JMS clients specify a URL that resolves the load balancer's network address. +Load balancer tunneling is the preferred approach for giving external clients and servers access to a Kubernetes hosted WebLogic cluster EJB and JMS. This approach involves configuring a network channel on the desired WebLogic cluster that accepts T3 protocol traffic that's tunneled over HTTP, deploying a load balancer that redirects external HTTP network traffic to the desired WebLogic network channel, and ensuring that EJB and JMS clients specify a URL that resolves the load balancer's network address. Here are the steps: -- Configure a custom channel for the T3 protocol in WebLogic that enables HTTP tunneling, and specifies an external address and port that correspond to the address and port remote clients will use to access the load balancer. See [Adding a WebLogic custom channel](#adding-a-weblogic-custom-channel) for samples and details. +- In WebLogic, configure a custom channel for the T3 protocol that enables HTTP tunneling, and specifies an external address and port that correspond to the address and port remote clients will use to access the load balancer. See [Adding a WebLogic custom channel](#adding-a-weblogic-custom-channel) for samples and details. -- Set up a load balancer that redirects HTTP traffic to the custom channel. For more information on load balancers, see [Ingress]({{}}). If you're also using OKE/OCI to host your Kubernetes cluster, also see [Using an OCI Load Balancer]({{}}). +- Set up a load balancer that redirects HTTP traffic to the custom channel. For more information on load balancers, see [Ingress]({{}}). If you're using OKE/OCI to host your Kubernetes cluster, also see [Using an OCI Load Balancer]({{}}). - __Important__: Ensure that the load balancer configures the HTTP flow to be 'sticky' - for example, a Traefik load balancer has a `sticky sessions` option. This ensures that all of the packets of a tunneling client connection flow to the same pod, otherwise the connection will stall when its packets are load balanced to a different pod. -- Remote clients can then access the custom channel using an `http://` URL instead of a `t3://` URL. +- If you are adding access for remote WebLogic Servers, then the Kubernetes hosted servers may need to [enable unknown host access](#enabling-unknown-host-access). + +- Remote clients and servers can then access the custom channel using an `http://` URL instead of a `t3://` URL. - Review the [Security notes](#security-notes). -##### Kubernetes `NodePorts` +#### Kubernetes `NodePorts` -The Kubernetes `NodePorts` approach for giving external WebLogic EJB or JMS clients access to a Kubernetes hosted WebLogic cluster involves configuring a network channel on the desired WebLogic cluster that accepts T3 protocol traffic, and exposing a Kubernetes `NodePort` that redirects external network traffic on the Kubernetes Nodes to the network channel. +Kubernetes `NodePorts` provide an alternative approach for giving external WebLogic EJB or JMS clients access to a Kubernetes hosted WebLogic cluster. This approach involves configuring a network channel on the desired WebLogic cluster that accepts T3 protocol traffic, and exposing a Kubernetes `NodePort` that redirects external network traffic on the Kubernetes Nodes to the network channel. {{% notice note %}} The `NodePort` approach is available only when worker nodes are accessible by the clients, for example, when they have public IP addresses. If private worker nodes are used and access to them is possible only through a load balancer or bastion, then the `NodePort` approach is not a valid option to provide access to external clients. @@ -40,10 +72,12 @@ The Kubernetes `NodePorts` approach for giving external WebLogic EJB or JMS clie Here are the steps: -- Configure a custom channel for the T3 protocol in WebLogic that specifies an external address and port that are suitable for remote client use. See [Adding a WebLogic custom channel](#adding-a-weblogic-custom-channel). +- In WebLogic, configure a custom channel for the T3 protocol that specifies an external address and port that are suitable for remote client use. See [Adding a WebLogic custom channel](#adding-a-weblogic-custom-channel). - Define a Kubernetes `NodePort` to publicly expose the WebLogic ports. See [Setting up a `NodePort`](#setting-up-a-nodeport). +- If you are adding access for remote WebLogic Servers, then the Kubernetes hosted servers may need to [enable unknown host access](#enabling-unknown-host-access). + - Review the [Security notes](#security-notes). #### Adding a WebLogic custom channel @@ -54,7 +88,7 @@ WebLogic implicitly creates a multi-protocol default channel that spans the `Lis A custom channel provides a way to configure an external listen address and port for use by external clients, unlike a default channel. External listen address or port configuration is needed when a channel's configured listen address or port would not work if used to form a URL in the remote client. This is because remote EJB and JMS clients internally use their client's channel's configured network information to reconnect to WebLogic when needed. (The EJB and JMS clients do not always use the initial URL specified in the client's JNDI context.) -A custom channel can be locked down using two-way SSL as a way to prevent access by unauthorized external JMS and EJB clients, only accepts protocols that are explicitly enabled for the channel, and can be configured to be the only channel that accepts EJB/JMS clients that tunnel over HTTP. A default channel may often be deliberately unencrypted for convenient internal use, or, if used externally, is only used for web traffic (not tunneling traffic). In addition, a default channel supports several protocols but it's a best practice to limit the protocols that can be accessed by external clients. Finally, external clients may require access using HTTP tunneling in order to make connections, but it's often inadvisable to enable tunneling for an unsecured default channel that's already servicing external HTTP traffic. This is because enabling HTTP tunneling would potentially allow unauthorized external JMS and EJB clients unsecured access to the WebLogic cluster through the same HTTP path. +A custom channel can be locked down using two-way SSL as a way to prevent access by unauthorized external JMS and EJB clients, only accepts protocols that are explicitly enabled for the channel, and can be configured to be the only channel that accepts EJB/JMS clients that tunnel over HTTP. A default channel may often be deliberately unencrypted for convenient internal use, or, if used externally, is used for web traffic (not tunneling traffic) only. In addition, a default channel supports several protocols but it's a best practice to limit the protocols that can be accessed by external clients. Finally, external clients may require access using HTTP tunneling in order to make connections, but it's often inadvisable to enable tunneling for an unsecured default channel that's already servicing external HTTP traffic. This is because enabling HTTP tunneling would potentially allow unauthorized external JMS and EJB clients unsecured access to the WebLogic cluster through the same HTTP path. ##### Configuring a WebLogic custom channel @@ -68,9 +102,11 @@ The basic requirements for configuring a custom channel for remote EJB and JMS a - Do _NOT_ set `outbound-enabled` to `true` on the network access point (the default is `false`), because this may cause internal network traffic to stall in an attempt to route through the network access point. -- Ensure you haven't enabled `calculated-listen-ports` for WebLogic dynamic cluster servers. The operator requires that a channel have the same port on each server in a cluster, but `calculated-listen-ports` causes the port to be different on each server. +- For operator controlled WebLogic clusters, ensure you haven't enabled `calculated-listen-ports` for WebLogic dynamic cluster servers. The operator requires that a channel have the same port on each server in a cluster, but `calculated-listen-ports` causes the port to be different on each server. -For example, here is a snippet of a WebLogic domain `config.xml` for channel `MyChannel` defined for a WebLogic dynamic cluster named `cluster-1`: +- For clusters that are _not_ operator controlled, minimally ensure that the server's default channel `ListenAddress` is configured. Oracle strongly recommends configuring a `ListenAddress` on all WebLogic Servers. Note that if a NAP's `ListenAddress` is left blank, then it will use the default channel's `ListenAddress`. (This is not a concern for operator controlled clusters as the operator sets the listen addresses on every WebLogic Server.) + +For example, here is a snippet of a WebLogic domain `config.xml` file for channel `MyChannel` defined for an operator controlled WebLogic dynamic cluster named `cluster-1`: ``` @@ -106,7 +142,7 @@ For example, here is a snippet of a WebLogic domain `config.xml` for channel `My ``` -And, here is a snippet of offline WLST code that corresponds to the above `config.xml` snippet: +And, here is a snippet of offline WLST code that corresponds to the above `config.xml` file snippet: ``` templateName = "cluster-1-template" @@ -117,6 +153,7 @@ And, here is a snippet of offline WLST code that corresponds to the above `confi set('Protocol', 't3') set('ListenPort', 7999) set('PublicPort', 30999) + set('PublicAddress', 'some.public.address.com') set('HttpEnabledForThisProtocol', true) set('TunnelingEnabled', true) set('OutboundEnabled', false) @@ -131,15 +168,17 @@ In this example: - The operator will automatically create a Kubernetes Service named `DOMAIN_UID-cluster-cluster-1` for both the custom and default channel. +- The operator will automatically set the `ListenAddress` on each WebLogic Server for each of its channels. + - Internal clients running in the same Kubernetes cluster as the channel can access the cluster using `t3://DOMAIN_UID-cluster-cluster-1:8001`. -- External clients would be expected to access the cluster using the custom channel using URLs like `t3://some.public.address.com:30999` or, if using tunneling, `http://some.public.address.com:30999`. +- External clients would be expected to access the cluster using the custom channel with URLs like `t3://some.public.address.com:30999` or, if using tunneling, `http://some.public.address.com:30999`. ##### WebLogic custom channel notes - Channel configuration for a configured cluster requires configuring the same network access point on each server. The operator currently doesn't test or support network channels that have a different configuration on each server in the cluster. -- Additional steps are required for external clients beyond configuring the custom channel - see [Approaches](#approaches). +- Additional steps are required for external clients beyond configuring the custom channel - see [Overview](#overview). #### Setting up a `NodePort` @@ -195,6 +234,23 @@ spec: |`spec.ports.nodePort`|The external port that clients will use. This must match the external port that's configured on the WebLogic configured channels/network access points. By default, Kubernetes requires that this value range from `30000` to `32767`.| |`spec.ports.port` and `spec.targetPort`|These must match the port that's configured on the WebLogic configured channel/network access points.| +#### Enabling unknown host access + +##### When is it necessary to enable unknown host access? + +If a source WebLogic Server attempts to initiate an EJB, JMS, or JTA connection with a target WebLogic Server, then the target WebLogic Server will reject the connection by default if it cannot find the source server's listen address in its DNS. Such a failed connection attempt can yield log messages or exceptions like `"...RJVM has already been shutdown..."` or `"...address was valid earlier, but now we get..."`. + +This means that it's usually necessary to enable unknown host access on an external WebLogic Server so that it can support EJB, JMS, or JTA communication that is initated by an operator hosted WebLogic Server. For example, if an operator hosted WebLogic Server with service address `mydomainuid-myservername` initiates a JMS connection to a remote WebLogic Server, then the remote server will implicitly attempt to lookup `mydomainuid-myservername` in its DNS as part of the connection setup, and this lookup will typically fail. + +Similarly, this also means that it's necessary to enable unknown host access on an operator hosted WebLogic Server that accepts EJB or JMS connection requests from external WebLogic Servers when the external WebLogic Server's listen addresses cannot be resolved by the DNS running in the Kuberneters cluster. + +##### How to enable unknown host access + +To enable an 'unknown host' source WebLogic Server to initiate EJB, JMS, or JTA communication with a target WebLogic Server: + * Set the `weblogic.rjvm.allowUnknownHost` Java system property to `true` on each target WebLogic Server. + * For operator hosted WebLogic Servers, you can set this property by including `-Dweblogic.rjvm.allowUnknownHost=true` in the `JAVA_OPTIONS` [Domain environment variable]({{< relref "/userguide/managing-domains/domain-resource#jvm-memory-and-java-option-environment-variables" >}}) defined in the domain resource's `spec.serverPod.env` attribute. + * Also apply patch 30656708 on each target WebLogic Server for versions 12.2.1.4 (PS4) or earlier. + #### Security notes - With some cloud providers, a load balancer or `NodePort` may implicitly expose a port to the public Internet. @@ -203,8 +259,7 @@ spec: - You can configure a custom channel with a secure protocol and two-way SSL to help prevent external access by unwanted clients. See [When is a WebLogic custom channel needed?](#when-is-a-weblogic-custom-channel-needed). - -#### Optional Reading +#### Optional reading - For sample JMS client code and JMS configuration, see [Run Standalone WebLogic JMS Clients on Kubernetes](https://blogs.oracle.com/weblogicserver/run-standalone-weblogic-jms-clients-on-kubernetes). diff --git a/docs-source/content/faq/namespace-management.md b/docs-source/content/faq/namespace-management.md index 861ff54a769..4d1561eddba 100644 --- a/docs-source/content/faq/namespace-management.md +++ b/docs-source/content/faq/namespace-management.md @@ -40,7 +40,7 @@ elkIntegrationEnabled: false externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 -image: oracle/weblogic-kubernetes-operator:3.0.1 +image: oracle/weblogic-kubernetes-operator:3.0.2 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 istioEnabled: false diff --git a/docs-source/content/quickstart/get-images.md b/docs-source/content/quickstart/get-images.md index 86ec76fbaf8..c912958fdeb 100644 --- a/docs-source/content/quickstart/get-images.md +++ b/docs-source/content/quickstart/get-images.md @@ -19,7 +19,7 @@ and accept the license agreement for the [WebLogic Server image](https://hub.doc 1. Pull the operator image: ```bash - $ docker pull oracle/weblogic-kubernetes-operator:3.0.1 + $ docker pull oracle/weblogic-kubernetes-operator:3.0.2 ``` 1. Pull the Traefik load balancer image: diff --git a/docs-source/content/quickstart/install.md b/docs-source/content/quickstart/install.md index 9a65f86c83e..6a821f483b3 100644 --- a/docs-source/content/quickstart/install.md +++ b/docs-source/content/quickstart/install.md @@ -71,7 +71,7 @@ $ helm install traefik-operator stable/traefik \ ```bash $ helm install sample-weblogic-operator kubernetes/charts/weblogic-operator \ --namespace sample-weblogic-operator-ns \ - --set image=oracle/weblogic-kubernetes-operator:3.0.1 \ + --set image=oracle/weblogic-kubernetes-operator:3.0.2 \ --set serviceAccount=sample-weblogic-operator-sa \ --set "domainNamespaces={}" \ --wait diff --git a/docs-source/content/release-notes.md b/docs-source/content/release-notes.md index be285312824..9405ed0785e 100644 --- a/docs-source/content/release-notes.md +++ b/docs-source/content/release-notes.md @@ -4,10 +4,11 @@ date: 2019-03-15T11:25:28-04:00 draft: false --- -### Recent changes +### Releases -| Date | Version | Introduces backward incompatibilities | Change | +| Date | Version | Introduces backward incompatibilities? | Changes | | --- | --- | --- | --- | +| September 15, 2020 | v3.0.2 | no | This release contains several fixes, including improvements to log rotation and a fix that avoids unnecessarily updating the domain status. | | August 13, 2020 | v3.0.1 | no | Fixed an issue preventing the REST interface from working after a Helm upgrade. Helm 3.1.3+ now required. | | July 17, 2020 | v3.0.0 | yes | Adds Model in Image feature and support for applying topology and configuration override changes without downtime. Removal of support for Helm 2.x. Operator performance improvements to manage many domains in the same Kubernetes cluster. | | June 22, 2020 | v2.6.0 | no | Kubernetes 1.16, 1.17, and 1.18 support. Removal of support for Kubernetes 1.13 and earlier. This release can be run in the same cluster with operators of either 2.5.0 and below, or with 3.x providing an upgrade path. Certified support of Oracle Linux Cloud Native Environment (OLCNE) 1.1 with Kubernetes 1.17.0. @@ -26,6 +27,22 @@ draft: false | April 4, 2018 | 0.2 | yes | Many Kubernetes artifact names and labels have changed. Also, the names of generated YAML files for creating a domain's PV and PVC have changed. Because of these changes, customers must recreate their operators and domains. | March 20, 2018 | | yes | Several files and input parameters have been renamed. This affects how operators and domains are created. It also changes generated Kubernetes artifacts, therefore customers must recreate their operators and domains. +### Change log + +#### Operator 3.0.2 + +* Removed unnecessary duplicated parameter in initialize-internal-operator-identity.sh script ([#1867](https://github.com/oracle/weblogic-kubernetes-operator/pull/1867)). +* Support nodeAffinity and nodeSelector for the operator in its Helm chart ([#1869](https://github.com/oracle/weblogic-kubernetes-operator/pull/1869)). +* Log file rotation enhancements and documentation ([#1872](https://github.com/oracle/weblogic-kubernetes-operator/pull/1872), [#1827](https://github.com/oracle/weblogic-kubernetes-operator/pull/1827)). +* Production support for the NGINX ingress controller ([#1878](https://github.com/oracle/weblogic-kubernetes-operator/pull/1878)). +* Prevent unnecessary changes to Domain status that were causing churn to the resourceVersion ([#1879](https://github.com/oracle/weblogic-kubernetes-operator/pull/1879)). +* Better reflect introspector status in the Domain status ([#1832](https://github.com/oracle/weblogic-kubernetes-operator/pull/1832)). +* Create each pod after any previous pods have been scheduled to allow for correct anti-affinity behavior ([#1855](https://github.com/oracle/weblogic-kubernetes-operator/pull/1855)). + +#### Operator 3.0.1 + +* Resolved an issue where a Helm upgrade was incorrectly removing the operator's private key thereby disabling the operator's REST interface ([#1846](https://github.com/oracle/weblogic-kubernetes-operator/pull/1846)). + ### Known issues | Issue | Description | diff --git a/docs-source/content/userguide/introduction/architecture.md b/docs-source/content/userguide/introduction/architecture.md index a9933281906..59607d2caed 100644 --- a/docs-source/content/userguide/introduction/architecture.md +++ b/docs-source/content/userguide/introduction/architecture.md @@ -18,7 +18,7 @@ The operator is packaged in a [Docker image](https://hub.docker.com/r/oracle/web ``` $ docker login -$ docker pull oracle/weblogic-kubernetes-operator:3.0.1 +$ docker pull oracle/weblogic-kubernetes-operator:3.0.2 ``` For more details on acquiring the operator image and prerequisites for installing the operator, consult the [Quick Start guide]({{< relref "/quickstart/_index.md" >}}). diff --git a/docs-source/content/userguide/introduction/introduction.md b/docs-source/content/userguide/introduction/introduction.md index aca78248d89..c56ba183ada 100644 --- a/docs-source/content/userguide/introduction/introduction.md +++ b/docs-source/content/userguide/introduction/introduction.md @@ -16,7 +16,7 @@ Detailed instructions are available [here]({{< relref "/userguide/managing-opera ### Operator prerequisites -For the current production release 3.0.1: +For the current production release 3.0.2: * Kubernetes 1.14.8+, 1.15.7+, 1.16.0+, 1.17.0+, and 1.18.0+ (check with `kubectl version`). * Flannel networking v0.9.1-amd64 or later (check with `docker images | grep flannel`) *or* OpenShift SDN on OpenShift 4.3 systems. diff --git a/docs-source/content/userguide/managing-domains/_index.md b/docs-source/content/userguide/managing-domains/_index.md index 8f58491e1a3..724f90640f7 100644 --- a/docs-source/content/userguide/managing-domains/_index.md +++ b/docs-source/content/userguide/managing-domains/_index.md @@ -13,6 +13,7 @@ description: "Important considerations for WebLogic domains in Kubernetes." * [About the Domain resource](#about-the-domain-resource) * [Managing life cycle operations](#managing-life-cycle-operations) * [Scaling clusters](#scaling-clusters) +* [Log files](#log-files) #### Important considerations for WebLogic domains in Kubernetes @@ -32,9 +33,10 @@ Be aware of the following important considerations for WebLogic domains running [WebLogic domain in Docker image protection]({{}}). {{% /notice %}} -* _Log File Locations:_ The operator can automatically override WebLogic domain and server log locations using - configuration overrides. This occurs if the Domain `logHomeEnabled` field is explicitly set to `true`, or if `logHomeEnabled` isn't set +* _Log File Locations:_ The operator can automatically override WebLogic Server, domain, and introspector log locations. + This occurs if the Domain `logHomeEnabled` field is explicitly set to `true`, or if `logHomeEnabled` isn't set and `domainHomeSourceType` is set to `PersistentVolume`. When overriding, the log location will be the location specified by the `logHome` setting. + For additional log file tuning information, see [Log files](#log-files). * _Listen Address Overrides:_ The operator will automatically override all WebLogic domain default, SSL, admin, or custom channel listen addresses (using situational configuration overrides). These will become `domainUID` followed by a @@ -132,3 +134,65 @@ The operator let's you initiate scaling of clusters in various ways: * [Using the operator's REST APIs]({{< relref "/userguide/managing-domains/domain-lifecycle/scaling#calling-the-operators-rest-scale-api" >}}) * [Using WLDF policies]({{< relref "/userguide/managing-domains/domain-lifecycle/scaling#using-a-wldf-policy-rule-and-script-action-to-call-the-operators-rest-scale-api" >}}) * [Using a Prometheus action]({{< relref "/userguide/managing-domains/domain-lifecycle/scaling#using-a-prometheus-alert-action-to-call-the-operators-rest-scale-api" >}}) + +### Log files + +The operator can automatically override WebLogic Server, domain, and introspector `.log` and `.out` locations. +This occurs if the Domain `logHomeEnabled` field is explicitly set to `true`, or if `logHomeEnabled` isn't set +and `domainHomeSourceType` is set to `PersistentVolume`. When overriding, the log location will be the location specified by the `logHome` setting. + +If you want to fine tune the `.log` and `.out` rotation behavior for WebLogic Servers and domains, then +you can update the related `Log MBean` in your WebLogic configuration. Alternatively, for WebLogic +Servers, you can set corresponding system properties in `JAVA_OPTIONS`: + +- Here are some WLST offline examples for creating and accessing commonly tuned Log MBeans: + + ```bash + # domain log + cd('/') + create(dname,'Log') + cd('/Log/' + dname); + + # configured server log for a server named 'sname' + cd('/Servers/' + sname) + create(sname, 'Log') + cd('/Servers/' + sname + '/Log/' + sname) + + # templated (dynamic) server log for a template named 'tname' + cd('/ServerTemplates/' + tname) + create(tname,'Log') + cd('/ServerTemplates/' + tname + '/Log/' + tname) + ``` + +- Here is sample WLST offline code for commonly tuned Log MBean attributes: + + ```bash + # minimum log file size before rotation in kilobytes + set('FileMinSize', 1000) + + # maximum number of rotated files + set('FileCount', 10) + + # set to true to rotate file every time on startup (instead of append) + set('RotateLogOnStartup', 'true') + ``` + +- Here are the defaults for commonly tuned Log MBean attributes: + + | Log MBean Attribute | Production Mode Default | Development Mode Default | + | --------- | ----------------------- | ------------------------ | + | FileMinSize (in kilobytes) | 5000 | 500 | + | FileCount | 100 | 7 | + | RotateLogOnStartup | false | true | + +- For WebLogic Server `.log` and `.out` files (including both dynamic and configured servers), you can alternatively +set logging attributes using system properties that start with `weblogic.log.` +and that end with the corresponding Log MBean attribute name. + + For example, you can include `-Dweblogic.log.FileMinSize=1000 -Dweblogic.log.FileCount=10 -Dweblogic.log.RotateLogOnStartup=true` in `domain.spec.serverPod.env.name.JAVA_OPTIONS` to set the behavior for all WebLogic Servers in your domain. For information about setting `JAVA_OPTIONS`, see [Domain resource]({{< relref "/userguide/managing-domains/domain-resource/_index.md#jvm-memory-and-java-option-environment-variables" >}}). + +{{% notice warning %}} +Kubernetes stores pod logs on each of its nodes, and, depending on the Kubernetes implementation, extra steps may be necessary to limit their disk space usage. +For more information, see [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). +{{% /notice %}} + diff --git a/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md b/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md index fc6657e7a54..fc2ad1642d4 100644 --- a/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md +++ b/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md @@ -188,7 +188,7 @@ d. Update the `image` field of the Domain YAML file, specifying the new image na ``` domain: spec: - image: oracle/weblogic-updated:3.0.1 + image: oracle/weblogic-updated:3.0.2 ``` e. The operator will now initiate a rolling restart, which will apply the updated image, for all the servers in the domain. diff --git a/docs-source/content/userguide/managing-domains/domain-resource.md b/docs-source/content/userguide/managing-domains/domain-resource.md index 442d7dffb29..6d22908fcdc 100644 --- a/docs-source/content/userguide/managing-domains/domain-resource.md +++ b/docs-source/content/userguide/managing-domains/domain-resource.md @@ -5,46 +5,70 @@ weight = 2 pre = " " +++ -Use this document to create your own [Domain resource](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/docs/domains/Domain.md), which can be used to configure the operation of your WebLogic Server domain. The Domain resource does not replace the traditional domain configuration files, but instead cooperates with those files to describe the Kubernetes artifacts of the corresponding domain. For instance, the domain configuration will still specify deployed applications, data sources, and most other details about the domain while the Domain resource will specify the number of cluster members currently running or the persistent volumes that will be mounted into the containers running WebLogic Server instances. +#### Contents -Many of the samples accompanying the operator project include scripts to generate an initial Domain resource from a set of simplified inputs; however, the Domain resource is the actual source of truth for how the operator will manage each WebLogic Server domain. You are encouraged to either start with the Domain resource YAML files generated by the various samples or create Domain resources manually or by using other tools based on the schema referenced here or this documentation. +- [Overview](#overview) +- [Prerequisites](#prerequisites) +- [Deploying domain resource YAML files](#deploying-domain-resource-yaml-files) +- [Domain resource custom resource definition (CRD)](#domain-resource-custom-resource-definition-crd) +- [Domain resource attribute references](#domain-resource-attribute-references) +- [Using `kubectl explain`](#using-kubectl-explain) +- [Domain spec elements](#domain-spec-elements) +- [JVM memory and Java option environment variables](#jvm-memory-and-java-option-environment-variables) +- [Pod generation](#pod-generation) + +#### Overview -Swagger documentation is available [here](https://oracle.github.io/weblogic-kubernetes-operator/swagger/index.html). +Use this document to create your own Domain resource, which can be used to configure the operation of your WebLogic Server domain. The Domain resource does not replace the traditional domain configuration files, but instead cooperates with those files to describe the Kubernetes artifacts of the corresponding domain. For instance, the domain configuration will still specify deployed applications, data sources, and most other details about the domain while the Domain resource will specify the number of cluster members currently running or the persistent volumes that will be mounted into the containers running WebLogic Server instances. + +{{% notice tip %}} +Many of the samples accompanying the operator project include scripts to generate an initial Domain resource from a set of simplified inputs; however, the Domain resource is the actual source of truth for how the operator will manage each WebLogic Server domain. You are encouraged to either start with the Domain resource YAML files generated by the various samples or create Domain resources manually or by using other tools based on the schema referenced here or this documentation. +{{% /notice %}} #### Prerequisites -The following prerequisites must be fulfilled before proceeding with the creation of the resource: +The following prerequisites must be fulfilled before proceeding with the creation of a domain resource: -* Make sure the WebLogic Server Kubernetes Operator is running. * Create a Kubernetes Namespace for the Domain unless the intention is to use the default namespace. -* Create the Kubernetes Secrets containing the `username` and `password` of the administrative account in the same Kubernetes Namespace as the Domain. +* Make sure the WebLogic Server Kubernetes Operator is running and is configured to monitor the namespace. +* Make sure any resources that the domain resource references are deployed to the same namespace. For example, all domain resources have a `spec.webLogicCredentialsSecret` field that references a Kubernetes Secret containing the `username` and `password` of the WebLogic server administrative account. -#### YAML files +For example, see the [Quick Start]({{< relref "/quickstart/_index.md" >}}). + +#### Deploying domain resource YAML files Domains are defined using YAML files. For each WebLogic Server domain you want to run, you should create one Domain resource YAML file and apply it. In the example referenced below, the sample scripts generate a Domain resource YAML file that you can use as a basis. Copy the file and override the default settings so that it matches all the WebLogic Server domain parameters that define your domain. See the WebLogic Server samples, [Domain home on a PV]({{< relref "/samples/simple/domains/domain-home-on-pv/_index.md" >}}), [Domain home in Image]({{< relref "/samples/simple/domains/domain-home-in-image/_index.md" >}}), and [Model in Image]({{< relref "/samples/simple/domains/model-in-image/_index.md" >}}). -#### Kubernetes resources - After you have written your YAML files, you use them to create your domain artifacts using the `kubectl apply -f` command. ```none $ kubectl apply -f domain-resource.yaml ``` -#### Verify the results - To confirm that the Domain was created, use this command: +```none +$ kubectl get domains -n [namespace] +``` + +To view all of the attributes of a running domain, including the domain's status, use this command: + ```none $ kubectl describe domain [domain name] -n [namespace] ``` -#### Domain resource overview +Or this command: + +```none +$ kubectl get domain [domain name] -n [namespace] -o yaml +``` + +#### Domain resource custom resource definition (CRD) -The Domain type is defined by a CustomResourceDefinition (CRD) and, like all [Kubernetes objects](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/), is described by three sections: `metadata`, `spec`, and `status`. +The Domain type is defined by a Kubernetes CustomResourceDefinition (CRD) and, like all [Kubernetes objects](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/), is described by three sections: `metadata`, `spec`, and `status`. The operator installs the CRD for the Domain type when the operator first starts. Customers may also choose to install the CRD in advance by using one of the provided YAML files. Installing the CRD in advance allows you to run the operator without giving it privilege (through Kubernetes roles and bindings) to access or update the CRD or other cluster-scoped resources. This may be necessary in environments where the operator cannot have cluster-scoped privileges, such as OpenShift Dedicated. The operator's role based access control (RBAC) requirements are documented [here]({{< relref "/security/rbac.md" >}}). @@ -66,10 +90,27 @@ After the CustomResourceDefinition is installed, either by the operator or using $ kubectl get crd domains.weblogic.oracle ``` +#### Domain resource attribute references + +The domain resource `metadata` section names the Domain and its namespace. The name of the Domain is the default value for the `domain UID` which is used by the operator to distinguish domains running in the Kubernetes cluster that may have the same domain name. The Domain name must be unique in the namespace and the domain UID should be unique across the cluster. The domain UID, Domain resource name, and domain name (from the WebLogic domain configuration) may all be different. + +The domain resource `spec` section describes the intended running state of the domain, including intended runtime state of WebLogic Server instances, number of cluster members started, and details about Kubernetes Pod or Service generation, such as resource constraints, scheduling requirements, or volume mounts. + +The operator automatically updates the `status` section of a deploy domain resource to describe the actual running state of the domain, including WebLogic Server instance runtime states and current health. + +Here are some references you can use for the fields in these sections: + +- See [Domain spec elements](domain-spec-elements) in this doc. +- See [Domain resource](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/docs/domains/Domain.md). +- Swagger documentation is available [here](https://oracle.github.io/weblogic-kubernetes-operator/swagger/index.html). +- Use [kubectl explain](leveraging--kubectl-explain-) from the command line. + +#### Using `kubectl explain` + If you are using Kubernetes 1.16 or later, you can access the description of any field of the Domain using `kubectl explain`. For instance, the following command displays the description of the `domainUID` field: ```none -$ kubectl explain domains.spec.domainUID +$ kubectl explain domain.spec.domainUID KIND: Domain VERSION: weblogic.oracle/v8 @@ -85,12 +126,6 @@ DESCRIPTION: `metadata.name`. ``` -The `metadata` section names the Domain and its namespace. The name of the Domain is the default value for the `domain UID` which is used by the operator to distinguish domains running in the Kubernetes cluster that may have the same domain name. The Domain name must be unique in the namespace and the domain UID should be unique across the cluster. The domain UID, Domain resource name, and domain name (from the WebLogic domain configuration) may all be different. - -The `spec` section describes the intended running state of the domain, including intended runtime state of WebLogic Server instances, number of cluster members started, and details about Kubernetes Pod or Service generation, such as resource constraints, scheduling requirements, or volume mounts. - -The operator automatically updates the `status` section to describe the actual running state of the domain, including WebLogic Server instance runtime states and current health. - #### Domain spec elements The Domain `spec` section contains elements for configuring the domain operation and sub-sections specific to the Administration Server, specific clusters, or specific Managed Servers. @@ -104,7 +139,7 @@ Elements related to domain identification, container image, and domain home: * `domainHome`: The directory containing the WebLogic domain configuration inside the container. Defaults to /shared/domains/domains/ if `domainHomeSourceType` is PersistentVolume. Defaults to /u01/oracle/user_projects/domains/ if `domainHomeSourceType` is Image. Defaults to /u01/domains/ if `domainHomeSourceType` is FromModel. * `domainHomeSourceType`: Domain home file system source type: Legal values: Image, PersistentVolume, FromModel. Image indicates that the domain home file system is present in the container image specified by the `image` field. PersistentVolume indicates that the domain home file system is located on a persistent volume. FromModel indicates that the domain home file system will be created and managed by the operator based on a WDT domain model. If this field is specified, it overrides the value of `domainHomeInImage`. If both fields are unspecified, then `domainHomeSourceType` defaults to Image. * `dataHome`: An optional directory in a server's container for data storage of default and custom file stores. If `dataHome` is not specified or its value is either not set or empty, then the data storage directories are determined from the WebLogic domain configuration. - + Elements related to logging: * `includeServerOutInPodLog`: Specifies whether the server .out file will be included in the Pod's log. Defaults to true. @@ -135,7 +170,7 @@ Elements related to specifying and overriding WebLogic domain configuration: * `overrideDistributionStrategy`: Determines how updated configuration overrides are distributed to already running WebLogic Server instances following introspection when the `domainHomeSourceType` is PersistentVolume or Image. Configuration overrides are generated during introspection from Secrets, the `overrideConfigMap` field, and WebLogic domain topology. Legal values are DYNAMIC, which means that the operator will distribute updated configuration overrides dynamically to running servers, and ON_RESTART, which means that servers will use updated configuration overrides only after the server's next restart. The selection of ON_RESTART will not cause servers to restart when there are updated configuration overrides available. See also `introspectVersion`. Defaults to DYNAMIC. * `secrets`: A list of names of the Secrets for WebLogic [configuration overrides]({{< relref "/userguide/managing-domains/configoverrides/_index.md" >}}) or model. If this field is specified, then the value of `spec.configOverrideSecrets` is ignored. * `introspectorJobActiveDeadlineSeconds`: The introspector job timeout value in seconds. If this field is specified, then the operator's ConfigMap `data.introspectorJobActiveDeadlineSeconds` value is ignored. Defaults to 120 seconds. - + * These elements are under `configuration.model`, only apply if the `domainHomeSourceType` is `FromModel`, and are discussed in [Model in Image]({{< relref "/userguide/managing-domains/model-in-image/_index.md" >}}). * `configMap`: Name of a ConfigMap containing the WebLogic Deploy Tooling model. @@ -154,7 +189,7 @@ Elements related to specifying and overriding WebLogic domain configuration: Elements related to Kubernetes Pod and Service generation: -* `serverPod`: Customization affecting the generation of Pods for WebLogic Server instances. +* `serverPod`: Customization affecting the generation of Pods for WebLogic Server instances. * `serverService`: Customization affecting the generation of Kubernetes Services for WebLogic Server instances. Sub-sections related to the Administration Server, specific clusters, or specific Managed Servers: @@ -165,6 +200,10 @@ Sub-sections related to the Administration Server, specific clusters, or specifi The elements `serverStartPolicy`, `serverStartState`, `serverPod` and `serverService` are repeated under `adminServer` and under each entry of `clusters` or `managedServers`. The values directly under `spec`, set the defaults for the entire domain. The values under a specific entry under `clusters`, set the defaults for cluster members of that cluster. The values under `adminServer` or an entry under `managedServers`, set the values for that specific server. Values from the domain scope and values from the cluster (for cluster members) are merged with or overridden by the setting for the specific server depending on the element. See [Startup and shutdown]({{< relref "/userguide/managing-domains/domain-lifecycle/startup.md" >}}) for details about `serverStartPolicy` combinations. +{{% notice note %}} +For additional domain resource attribute reference material, see [Domain resource attribute references](domain-resource-attribute-references). +{{% /notice %}} + ### JVM memory and Java option environment variables You can use the following environment variables to specify JVM memory and JVM option arguments to WebLogic Server Managed Server and Node Manager instances: @@ -182,7 +221,8 @@ You can use the following environment variables to specify JVM memory and JVM op * If `NODEMGR_JAVA_OPTIONS` is not defined and `JAVA_OPTIONS` is defined, then the `JAVA_OPTIONS` value will be applied to the Node Manager instance. * If `NODEMGR_MEM_ARGS` is not defined, then default memory and Java security property values (`-Xms64m -Xmx100m -Djava.security.egd=file:/dev/./urandom`) will be applied to the Node Manager instance. It can be explicitly set to another value in your Domain YAML file using the `env` attribute under the `serverPod` configuration. * The `USER_MEM_ARGS` environment variable defaults to `-Djava.security.egd=file:/dev/./urandom` in all WebLogic Server pods and the WebLogic introspection job. It can be explicitly set to another value in your Domain YAML file using the `env` attribute under the `serverPod` configuration. -* Notice that the `NODEMGR_MEM_ARGS` and `USER_MEM_ARGS` environment variables both set `-Djava.security.egd=file:/dev/./urandom` by default. This respectively helps to speed up the Node Manager and WebLogic Server startup on systems with low entropy. +* Notice that the `NODEMGR_MEM_ARGS` and `USER_MEM_ARGS` environment variables both set `-Djava.security.egd=file:/dev/./urandom` by default. This respectively helps to speed up the Node Manager and WebLogic Server startup on systems with low entropy. +* For a detailed discussion of Java and pod memory tuning see the [Pod memory and CPU resources FAQ]({{}}). * You can use `JAVA_OPTIONS` and `WLSDEPLOY_PROPERTIES` to disable Fast Application Notifications (FAN); see the [Disable Fast Application Notifications FAQ]({{}}) for details. This example snippet illustrates how to add some of the above environment variables using the `env` attribute under the `serverPod` configuration in your Domain YAML file. @@ -211,7 +251,7 @@ spec: value: "-Xms64m -Xmx100m -Djava.security.egd=file:/dev/./urandom " ``` -### Pod generation +#### Pod generation The operator creates a Pod for each running WebLogic Server instance. This Pod will have a container, named `weblogic-server`, based on the container image specified by the `image` field. Additional Pod or container content can be specified using the elements under `serverPod`. This includes Kubernetes sidecar and init containers, labels, annotations, volumes, volume mounts, scheduling constraints, including anti-affinity, [resource requirements](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/), or [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). diff --git a/docs-source/content/userguide/managing-domains/ingress/_index.md b/docs-source/content/userguide/managing-domains/ingress/_index.md index 44be92ed7e3..f036af6b1d2 100644 --- a/docs-source/content/userguide/managing-domains/ingress/_index.md +++ b/docs-source/content/userguide/managing-domains/ingress/_index.md @@ -34,7 +34,7 @@ The service, `serviceName` and `servicePort`, of a WebLogic cluster will be used object and the load balancer will route traffic to the WebLogic Servers within the cluster based on the rules. {{% notice note %}} -Most common ingress controllers, for example Traefik, Voyager, and nginx, +Most common ingress controllers, for example Traefik, Voyager, and NGINX, understand that there are zero or more actual pods behind the service, and they actually build their backend list and route requests to those backends directly, not through the service. This means that requests are properly balanced across the pods, according to the load balancing algorithm @@ -70,7 +70,7 @@ Information about how to install and configure these to load balance WebLogic cl - [Voyager guide](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/voyager/README.md) {{% notice note %}} - For production environments, we recommend Traefik (2.2.1 or later), Apache or Voyager ingress controllers, or the load balancer provided by your cloud provider. + For production environments, we recommend Traefik (2.2.1 or later), Apache, NGINX, or Voyager ingress controllers, or the load balancer provided by your cloud provider. {{% /notice %}} Samples are also provided for these two ingress controllers, showing how to manage multiple WebLogic clusters as the backends, using different routing rules, host-routing and path-routing; and TLS termination: diff --git a/docs-source/content/userguide/managing-operators/installation/_index.md b/docs-source/content/userguide/managing-operators/installation/_index.md index 55776c6ef87..517194c054c 100644 --- a/docs-source/content/userguide/managing-operators/installation/_index.md +++ b/docs-source/content/userguide/managing-operators/installation/_index.md @@ -115,7 +115,7 @@ the `helm upgrade` command requires that you supply a new Helm chart and image. ``` $ helm upgrade \ --reuse-values \ - --set image=oracle/weblogic-kubernetes-operator:3.0.1 \ + --set image=oracle/weblogic-kubernetes-operator:3.0.2 \ --namespace weblogic-operator-namespace \ --wait \ weblogic-operator \ diff --git a/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md b/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md index a0462ce5628..9f375360c27 100644 --- a/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md +++ b/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md @@ -129,7 +129,7 @@ javaLoggingLevel: "FINE" Specifies the Docker image containing the operator code. -Defaults to `weblogic-kubernetes-operator:3.0.1`. +Defaults to `weblogic-kubernetes-operator:3.0.2`. Example: ``` @@ -155,6 +155,40 @@ imagePullSecrets: - name: "my-image-pull-secret" ``` +##### `nodeSelector` +Allows you to run the operator Pod on a Node whose labels match the specified `nodeSelector` labels. You can use this optional feature if you want the operator Pod to run on a Node with particular labels. See [Assign Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) in the Kubernetes documentation for more details. This is not required if the operator Pod can run on any Node. + +Example: +``` +nodeSelector: + disktype: ssd +``` + +##### `nodeAffinity` +Allows you to constrain the operator Pod to be scheduled on a Node with certain labels; it is conceptually similar to `nodeSelector`. `nodeAffinity` provides advanced capabilities to limit Pod placement on specific Nodes. See [Assign Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) in the Kubernetes documentation for more details. This is optional and not required if the operator Pod can run on any Node or when using `nodeSelector`. + +Example: +``` +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: nodeType + operator: In + values: + - dev + - test + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value +``` + #### WebLogic domain management ##### `domainNamespaces` diff --git a/docs/charts/index.yaml b/docs/charts/index.yaml index 906db3fd745..d3d8c325c62 100644 --- a/docs/charts/index.yaml +++ b/docs/charts/index.yaml @@ -2,7 +2,15 @@ apiVersion: v1 entries: weblogic-operator: - apiVersion: v1 - created: "2020-08-11T15:14:12.218809-04:00" + created: "2020-09-11T12:55:24.710015-04:00" + description: Helm chart for configuring the WebLogic operator. + digest: 84b5989fe8f2392d2b3b0f721bdab1562566d7d885324beafd9fc9e658b13cd3 + name: weblogic-operator + urls: + - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-3.0.2.tgz + version: 3.0.2 + - apiVersion: v1 + created: "2020-09-11T12:55:24.707566-04:00" description: Helm chart for configuring the WebLogic operator. digest: e7654ad3f2168f54b3a4b133bf8a86ea12bc474e5ee1d3ab14e1cf53012e9772 name: weblogic-operator @@ -10,7 +18,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-3.0.1.tgz version: 3.0.1 - apiVersion: v1 - created: "2020-08-11T15:14:12.215675-04:00" + created: "2020-09-11T12:55:24.705951-04:00" description: Helm chart for configuring the WebLogic operator. digest: 303288a48a6075d52538bb4e17dd46f8b1431158ee81bc878d9f1f264d30192e name: weblogic-operator @@ -18,7 +26,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-3.0.0.tgz version: 3.0.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.214502-04:00" + created: "2020-09-11T12:55:24.703932-04:00" description: Helm chart for configuring the WebLogic operator. digest: 5f4cd8f4f3282b52b5e90a1169f26986e8272671845053606ade9c855fb04151 name: weblogic-operator @@ -26,7 +34,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-3.0.0-rc1.tgz version: 3.0.0-rc1 - apiVersion: v1 - created: "2020-08-11T15:14:12.212883-04:00" + created: "2020-09-11T12:55:24.702155-04:00" description: Helm chart for configuring the WebLogic operator. digest: d441888a8deae1b1339e7585e3b437dfd2533303e46e842d7378e16db665e234 name: weblogic-operator @@ -34,7 +42,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.6.0.tgz version: 2.6.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.21132-04:00" + created: "2020-09-11T12:55:24.700461-04:00" description: Helm chart for configuring the WebLogic operator. digest: fe41421b7dc45dc8a3b2888d3a626a37f5d3c8e1fa292fb6699deedc5e1db33d name: weblogic-operator @@ -42,7 +50,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.5.0.tgz version: 2.5.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.210065-04:00" + created: "2020-09-11T12:55:24.699148-04:00" description: Helm chart for configuring the WebLogic operator. digest: b36bd32083f67453a62d089a2c09ce38e6655d88ac8a7b38691230c55c40e672 name: weblogic-operator @@ -50,7 +58,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.4.0.tgz version: 2.4.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.203115-04:00" + created: "2020-09-11T12:55:24.697544-04:00" description: Helm chart for configuring the WebLogic operator. digest: a3eafe4c2c6ff49384e56421201e59a3737d651af8d5b605b87a19eb1f6f1dc3 name: weblogic-operator @@ -58,7 +66,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.3.1.tgz version: 2.3.1 - apiVersion: v1 - created: "2020-08-11T15:14:12.191067-04:00" + created: "2020-09-11T12:55:24.690637-04:00" description: Helm chart for configuring the WebLogic operator. digest: cbc6caaa6eb28e3c7e906ede14b2ae511a0b35fc12a8e3ab629155b09993e8b2 name: weblogic-operator @@ -66,7 +74,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.3.0.tgz version: 2.3.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.189715-04:00" + created: "2020-09-11T12:55:24.68857-04:00" description: Helm chart for configuring the WebLogic operator. digest: 23d5a1c554fa8211cc1e86b7ade09460917cb2069e68fb4bfdddafc8db44fdcd name: weblogic-operator @@ -74,7 +82,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.2.1.tgz version: 2.2.1 - apiVersion: v1 - created: "2020-08-11T15:14:12.188569-04:00" + created: "2020-09-11T12:55:24.687008-04:00" description: Helm chart for configuring the WebLogic operator. digest: bba303686cb55d84fe8c0d693a2436e7e686b028085b56e012f6381699a3911f name: weblogic-operator @@ -82,7 +90,7 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.2.0.tgz version: 2.2.0 - apiVersion: v1 - created: "2020-08-11T15:14:12.18727-04:00" + created: "2020-09-11T12:55:24.685435-04:00" description: Helm chart for configuring the WebLogic operator. digest: 391e23c0969ada5f0cd2a088ddc6f11f237f57521801ed3925db2149a8437a0d name: weblogic-operator @@ -90,11 +98,11 @@ entries: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.1.tgz version: "2.1" - apiVersion: v1 - created: "2020-08-11T15:14:12.185637-04:00" + created: "2020-09-11T12:55:24.683893-04:00" description: Helm chart for configuring the WebLogic operator. digest: 298acda78ab73db6b7ba6f2752311bfa40c65874e03fb196b70976192211c1a5 name: weblogic-operator urls: - https://oracle.github.io/weblogic-kubernetes-operator/charts/weblogic-operator-2.0.1.tgz version: 2.0.1 -generated: "2020-08-11T15:14:12.184184-04:00" +generated: "2020-09-11T12:55:24.68161-04:00" diff --git a/docs/charts/weblogic-operator-3.0.2.tgz b/docs/charts/weblogic-operator-3.0.2.tgz new file mode 100644 index 00000000000..0dabaf664ae Binary files /dev/null and b/docs/charts/weblogic-operator-3.0.2.tgz differ diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index 4750163f5b1..1b24fa56ee8 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -7,7 +7,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 operator-integration-tests diff --git a/json-schema-maven-plugin/pom.xml b/json-schema-maven-plugin/pom.xml index 4a0245850ec..4ea9203a35f 100644 --- a/json-schema-maven-plugin/pom.xml +++ b/json-schema-maven-plugin/pom.xml @@ -7,7 +7,7 @@ operator-parent oracle.kubernetes - 3.0.1 + 3.0.2 jsonschema-maven-plugin diff --git a/json-schema/pom.xml b/json-schema/pom.xml index 3e650cd2313..802070e3ac9 100644 --- a/json-schema/pom.xml +++ b/json-schema/pom.xml @@ -7,7 +7,7 @@ operator-parent oracle.kubernetes - 3.0.1 + 3.0.2 json-schema diff --git a/kubernetes/charts/weblogic-operator/Chart.yaml b/kubernetes/charts/weblogic-operator/Chart.yaml index c6ceff4fd4f..f713bf4693a 100644 --- a/kubernetes/charts/weblogic-operator/Chart.yaml +++ b/kubernetes/charts/weblogic-operator/Chart.yaml @@ -3,5 +3,5 @@ name: weblogic-operator apiVersion: v1 -version: 3.0.1 +version: 3.0.2 description: Helm chart for configuring the WebLogic operator. diff --git a/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl index f737078f506..e2ff3b08ec6 100644 --- a/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl +++ b/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -22,6 +22,14 @@ spec: app: "weblogic-operator" spec: serviceAccountName: {{ .serviceAccount | quote }} + {{- with .nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: "weblogic-operator" image: {{ .image | quote }} diff --git a/kubernetes/charts/weblogic-operator/values.yaml b/kubernetes/charts/weblogic-operator/values.yaml index 5829c2ea093..979bfcbd29c 100644 --- a/kubernetes/charts/weblogic-operator/values.yaml +++ b/kubernetes/charts/weblogic-operator/values.yaml @@ -25,7 +25,7 @@ domainNamespaces: - "default" # image specifies the docker image containing the operator code. -image: "oracle/weblogic-kubernetes-operator:3.0.1" +image: "oracle/weblogic-kubernetes-operator:3.0.2" # imagePullPolicy specifies the image pull policy for the operator docker image. imagePullPolicy: "IfNotPresent" @@ -64,7 +64,14 @@ externalRestHttpsPort: 31001 # kubernetes/samples/scripts/rest/generate-external-rest-identity.sh #externalRestIdentitySecret: -# remoteDebugNodePortEnabled specifies whether or not the operator will start a Java remote debug server on the +# javaLoggingLevel specifies the Java logging level for the operator. +# Valid values are: "SEVERE", "WARNING", "INFO", "CONFIG", "FINE", "FINER", and "FINEST". +javaLoggingLevel: "INFO" + +# Values related to debugging the operator. +# Customers should not need to use the following properties + +# remoteDebugNodePortEnabled specifies whether or not the operator will provide a Java remote debug interface on the # provided port. If the 'suspendOnDebugStartup' property is specified, the operator will suspend execution # until a remote debugger has attached. # The 'internalDebugHttpPort' property controls the port number inside the Kubernetes diff --git a/kubernetes/pom.xml b/kubernetes/pom.xml index d3c65bb3620..99c49c41baf 100644 --- a/kubernetes/pom.xml +++ b/kubernetes/pom.xml @@ -9,7 +9,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 installation-tests diff --git a/new-integration-tests/pom.xml b/new-integration-tests/pom.xml index 40d44e77bdf..4899d7978e1 100644 --- a/new-integration-tests/pom.xml +++ b/new-integration-tests/pom.xml @@ -7,7 +7,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 new-integration-tests diff --git a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/TestConstants.java b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/TestConstants.java index 5ff493b5c24..0c040297b7d 100644 --- a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/TestConstants.java +++ b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/TestConstants.java @@ -13,70 +13,79 @@ public interface TestConstants { // domain constants - public static final String DOMAIN_VERSION = "v8"; + public static final String DOMAIN_VERSION = Optional.ofNullable(System.getenv("DOMAIN_VERSION")) + .orElse("v8"); public static final String DOMAIN_API_VERSION = "weblogic.oracle/" + DOMAIN_VERSION; public static final String ADMIN_SERVER_NAME_BASE = "admin-server"; public static final String MANAGED_SERVER_NAME_BASE = "managed-server"; public static final String WLS_DOMAIN_TYPE = "WLS"; public static final String WLS_DEFAULT_CHANNEL_NAME = "default"; - public static final String DEFAULT_WLS_IMAGE_TAGS = "12.2.1.3, 14.1.1.0"; + public static final String DEFAULT_WLS_IMAGE_TAGS = "12.2.1.4, 14.1.1.0-11"; // operator constants public static final String OPERATOR_RELEASE_NAME = "weblogic-operator"; public static final String OPERATOR_CHART_DIR = - "../kubernetes/charts/weblogic-operator"; + "../kubernetes/charts/weblogic-operator"; public static final String IMAGE_NAME_OPERATOR = - "oracle/weblogic-kubernetes-operator"; + "oracle/weblogic-kubernetes-operator"; public static final String OPERATOR_DOCKER_BUILD_SCRIPT = - "../buildDockerImage.sh"; + "../buildDockerImage.sh"; public static final String OPERATOR_SERVICE_NAME = "internal-weblogic-operator-svc"; public static final String REPO_DUMMY_VALUE = "dummy"; public static final String REPO_SECRET_NAME = "ocir-secret"; public static final String REPO_REGISTRY = Optional.ofNullable(System.getenv("REPO_REGISTRY")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); public static final String REPO_DEFAULT = "phx.ocir.io/weblogick8s/"; public static final String KIND_REPO = System.getenv("KIND_REPO"); public static final String REPO_NAME = Optional.ofNullable(KIND_REPO) - .orElse(!REPO_REGISTRY.equals(REPO_DUMMY_VALUE) ? REPO_DEFAULT : ""); + .orElse(!REPO_REGISTRY.equals(REPO_DUMMY_VALUE) ? REPO_DEFAULT : ""); public static final String REPO_USERNAME = Optional.ofNullable(System.getenv("REPO_USERNAME")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); public static final String REPO_PASSWORD = Optional.ofNullable(System.getenv("REPO_PASSWORD")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); public static final String REPO_EMAIL = Optional.ofNullable(System.getenv("REPO_EMAIL")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); + public static final String OPERATOR_GITHUB_CHART_REPO_URL = + "https://oracle.github.io/weblogic-kubernetes-operator/charts"; // OCR registry public static final String OCR_SECRET_NAME = "ocr-secret"; public static final String OCR_REGISTRY = "container-registry.oracle.com"; public static final String OCR_USERNAME = Optional.ofNullable(System.getenv("OCR_USERNAME")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); public static final String OCR_PASSWORD = Optional.ofNullable(System.getenv("OCR_PASSWORD")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); public static final String OCR_EMAIL = Optional.ofNullable(System.getenv("OCR_EMAIL")) - .orElse(REPO_DUMMY_VALUE); + .orElse(REPO_DUMMY_VALUE); // jenkins constants public static final String BUILD_ID = Optional.ofNullable(System.getenv("BUILD_ID")) - .orElse(""); + .orElse(""); public static final String BRANCH_NAME_FROM_JENKINS = Optional.ofNullable(System.getenv("BRANCH")) - .orElse(""); + .orElse(""); public static final String K8S_NODEPORT_HOST = Optional.ofNullable(System.getenv("K8S_NODEPORT_HOST")) - .orElse(assertDoesNotThrow(() -> InetAddress.getLocalHost().getHostName())); + .orElse(assertDoesNotThrow(() -> InetAddress.getLocalHost().getHostName())); public static final String GOOGLE_REPO_URL = "https://kubernetes-charts.storage.googleapis.com/"; public static final String RESULTS_ROOT = System.getenv().getOrDefault("RESULT_ROOT", - System.getProperty("java.io.tmpdir")) + "/ittestsresults"; + System.getProperty("java.io.tmpdir")) + "/ittestsresults"; public static final String LOGS_DIR = System.getenv().getOrDefault("RESULT_ROOT", - System.getProperty("java.io.tmpdir")) + "/diagnosticlogs"; + System.getProperty("java.io.tmpdir")) + "/diagnosticlogs"; public static final String PV_ROOT = System.getenv().getOrDefault("PV_ROOT", - System.getProperty("java.io.tmpdir") + "/ittestspvroot"); + System.getProperty("java.io.tmpdir") + "/ittestspvroot"); // NGINX constants public static final String NGINX_RELEASE_NAME = "nginx-release" + BUILD_ID; public static final String STABLE_REPO_NAME = "stable"; public static final String NGINX_CHART_NAME = "nginx-ingress"; + // Traefik constants + public static final String TRAEFIK_REPO_URL = "https://containous.github.io/traefik-helm-chart"; + public static final String TRAEFIK_REPO_NAME = "traefik"; + public static final String TRAEFIK_RELEASE_NAME = "traefik-release" + BUILD_ID; + public static final String TRAEFIK_CHART_NAME = "traefik"; + // Voyager constants public static final String APPSCODE_REPO_URL = "https://charts.appscode.com/stable/"; public static final String VOYAGER_RELEASE_NAME = "voyager-release" + BUILD_ID; @@ -84,6 +93,44 @@ public interface TestConstants { public static final String VOYAGER_CHART_NAME = "voyager"; public static final String VOYAGER_CHART_VERSION = "12.0.0"; + // Apache constants + public static final String APACHE_IMAGE_NAME = "phx.ocir.io/weblogick8s/oracle/apache"; + public static final String APACHE_IMAGE_VERSION = "12.2.1.3"; + public static final String APACHE_IMAGE = APACHE_IMAGE_NAME + ":" + APACHE_IMAGE_VERSION; + public static final String APACHE_RELEASE_NAME = "apache-release" + BUILD_ID; + public static final String APACHE_SAMPLE_CHART_DIR = "../kubernetes/samples/charts/apache-webtier"; + + // ELK Stack and WebLogic logging exporter constants + public static final String ELASTICSEARCH_NAME = "elasticsearch"; + public static final String ELK_STACK_VERSION = "7.8.1"; + public static final String ELASTICSEARCH_IMAGE = ELASTICSEARCH_NAME + ":" + ELK_STACK_VERSION; + public static final String ELASTICSEARCH_HOST = "elasticsearch.default.svc.cluster.local"; + public static final int ELASTICSEARCH_HTTP_PORT = 9200; + public static final int ELASTICSEARCH_HTTPS_PORT = 9300; + public static final String ELKSTACK_NAMESPACE = "default"; + public static final String LOGSTASH_INDEX_KEY = "logstash"; + public static final String WEBLOGIC_INDEX_KEY = "wls"; + public static final String KIBANA_INDEX_KEY = "kibana"; + public static final String KIBANA_NAME = "kibana"; + public static final String KIBANA_IMAGE = KIBANA_NAME + ":" + ELK_STACK_VERSION; + public static final String KIBANA_TYPE = "NodePort"; + public static final int KIBANA_PORT = 5601; + public static final String LOGSTASH_NAME = "logstash"; + public static final String LOGSTASH_IMAGE = LOGSTASH_NAME + ":" + ELK_STACK_VERSION; + public static final String JAVA_LOGGING_LEVEL_VALUE = "INFO"; + + public static final String WLS_LOGGING_EXPORTER_JAR_VERSION = "1.0.0"; + public static final String WLS_LOGGING_EXPORTER_JAR_REPOS = + "https://github.com/oracle/weblogic-logging-exporter/releases/download/v" + WLS_LOGGING_EXPORTER_JAR_VERSION; + public static final String WLS_LOGGING_EXPORTER_JAR_NAME = + "weblogic-logging-exporter-" + WLS_LOGGING_EXPORTER_JAR_VERSION + ".jar"; + public static final String SNAKE_YAML_JAR_VERSION = "1.23"; + public static final String SNAKE_YAML_JAR_REPOS = + "https://repo1.maven.org/maven2/org/yaml/snakeyaml/" + SNAKE_YAML_JAR_VERSION; + public static final String SNAKE_YAML_JAR_NAME = "snakeyaml-" + SNAKE_YAML_JAR_VERSION + ".jar"; + public static final String WLS_LOGGING_EXPORTER_YAML_FILE_NAME = "WebLogicLoggingExporter.yaml"; + public static final String COPY_WLS_LOGGING_EXPORTER_FILE_NAME = "copy-logging-files-cmds.txt"; + // MII image constants public static final String MII_BASIC_WDT_MODEL_FILE = "model-singleclusterdomain-sampleapp-wls.yaml"; public static final String MII_BASIC_IMAGE_NAME = REPO_NAME + "mii-basic-image"; @@ -99,7 +146,6 @@ public interface TestConstants { public static final String READ_STATE_COMMAND = "/weblogic-operator/scripts/readState.sh"; // WDT domain-in-image constants - public static final String WDT_BASIC_MODEL_FILE = "wdt-singlecluster-sampleapp-usingprop-wls.yaml"; public static final String WDT_BASIC_MODEL_PROPERTIES_FILE = "wdt-singleclusterdomain-sampleapp-wls.properties"; public static final String WDT_BASIC_IMAGE_NAME = REPO_NAME + "wdt-basic-image"; @@ -111,12 +157,15 @@ public interface TestConstants { //monitoring constants public static final String MONITORING_EXPORTER_VERSION = Optional.ofNullable(System.getenv( - "MONITORING_EXPORTER_VERSION")) - .orElse("1.1.2"); + "MONITORING_EXPORTER_VERSION")) + .orElse("1.2.0"); + public static final String MONITORING_EXPORTER_BRANCH = Optional.ofNullable(System.getenv( + "MONITORING_EXPORTER_BRANCH")) + .orElse("master"); public static final String PROMETHEUS_CHART_VERSION = Optional.ofNullable(System.getenv("PROMETHEUS_CHART_VERSION")) - .orElse("11.1.5"); + .orElse("11.1.5"); public static final String GRAFANA_CHART_VERSION = Optional.ofNullable(System.getenv("GRAFANA_CHART_VERSION")) - .orElse("5.0.20"); + .orElse("5.0.20"); // credentials public static final String ADMIN_USERNAME_DEFAULT = "weblogic"; public static final String ADMIN_PASSWORD_DEFAULT = "welcome1"; @@ -126,7 +175,7 @@ public interface TestConstants { // REST API public static final String PROJECT_ROOT = System.getProperty("user.dir"); public static final String GEN_EXTERNAL_REST_IDENTITY_FILE = - PROJECT_ROOT + "/../kubernetes/samples/scripts/rest/generate-external-rest-identity.sh"; + PROJECT_ROOT + "/../kubernetes/samples/scripts/rest/generate-external-rest-identity.sh"; public static final String DEFAULT_EXTERNAL_REST_IDENTITY_SECRET_NAME = "weblogic-operator-external-rest-identity"; // JRF constants @@ -141,4 +190,4 @@ public interface TestConstants { //MySQL database constants public static final String MYSQL_VERSION = "5.6"; -} +} \ No newline at end of file diff --git a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/Kubernetes.java b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/Kubernetes.java index cb60f4d7fb4..b519902bf3a 100644 --- a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/Kubernetes.java +++ b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/Kubernetes.java @@ -43,6 +43,7 @@ import io.kubernetes.client.openapi.models.V1ClusterRoleList; import io.kubernetes.client.openapi.models.V1ConfigMap; import io.kubernetes.client.openapi.models.V1ConfigMapList; +import io.kubernetes.client.openapi.models.V1Container; import io.kubernetes.client.openapi.models.V1ContainerStatus; import io.kubernetes.client.openapi.models.V1Deployment; import io.kubernetes.client.openapi.models.V1DeploymentList; @@ -75,6 +76,7 @@ import io.kubernetes.client.openapi.models.V1ServiceList; import io.kubernetes.client.openapi.models.V1ServicePort; import io.kubernetes.client.util.ClientBuilder; +import io.kubernetes.client.util.PatchUtils; import oracle.weblogic.domain.Domain; import oracle.weblogic.domain.DomainList; import oracle.weblogic.kubernetes.logging.LoggingFacade; @@ -143,8 +145,8 @@ public class Kubernetes { initializeGenericKubernetesApiClients(); // create standard, reusable retry/backoff policy withStandardRetryPolicy = with().pollDelay(2, SECONDS) - .and().with().pollInterval(10, SECONDS) - .atMost(5, MINUTES).await(); + .and().with().pollInterval(10, SECONDS) + .atMost(5, MINUTES).await(); } catch (IOException ioex) { throw new ExceptionInInitializerError(ioex); } @@ -156,134 +158,134 @@ public class Kubernetes { private static void initializeGenericKubernetesApiClients() { // Invocation parameters aren't changing so create them as statics configMapClient = - new GenericKubernetesApi<>( - V1ConfigMap.class, // the api type class - V1ConfigMapList.class, // the api list type class - "", // the api group - "v1", // the api version - "configmaps", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1ConfigMap.class, // the api type class + V1ConfigMapList.class, // the api list type class + "", // the api group + "v1", // the api version + "configmaps", // the resource plural + apiClient //the api client + ); crdClient = - new GenericKubernetesApi<>( - Domain.class, // the api type class - DomainList.class, // the api list type class - DOMAIN_GROUP, // the api group - DOMAIN_VERSION, // the api version - DOMAIN_PLURAL, // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + Domain.class, // the api type class + DomainList.class, // the api list type class + DOMAIN_GROUP, // the api group + DOMAIN_VERSION, // the api version + DOMAIN_PLURAL, // the resource plural + apiClient //the api client + ); deploymentClient = - new GenericKubernetesApi<>( - V1Deployment.class, // the api type class - V1DeploymentList.class, // the api list type class - "", // the api group - "v1", // the api version - "deployments", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Deployment.class, // the api type class + V1DeploymentList.class, // the api list type class + "", // the api group + "v1", // the api version + "deployments", // the resource plural + apiClient //the api client + ); jobClient = - new GenericKubernetesApi<>( - V1Job.class, // the api type class - V1JobList.class, // the api list type class - "", // the api group - "v1", // the api version - "jobs", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Job.class, // the api type class + V1JobList.class, // the api list type class + "batch", // the api group + "v1", // the api version + "jobs", // the resource plural + apiClient //the api client + ); namespaceClient = - new GenericKubernetesApi<>( - V1Namespace.class, // the api type class - V1NamespaceList.class, // the api list type class - "", // the api group - "v1", // the api version - "namespaces", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Namespace.class, // the api type class + V1NamespaceList.class, // the api list type class + "", // the api group + "v1", // the api version + "namespaces", // the resource plural + apiClient //the api client + ); podClient = - new GenericKubernetesApi<>( - V1Pod.class, // the api type class - V1PodList.class, // the api list type class - "", // the api group - "v1", // the api version - "pods", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Pod.class, // the api type class + V1PodList.class, // the api list type class + "", // the api group + "v1", // the api version + "pods", // the resource plural + apiClient //the api client + ); pvClient = - new GenericKubernetesApi<>( - V1PersistentVolume.class, // the api type class - V1PersistentVolumeList.class, // the api list type class - "", // the api group - "v1", // the api version - "persistentvolumes", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1PersistentVolume.class, // the api type class + V1PersistentVolumeList.class, // the api list type class + "", // the api group + "v1", // the api version + "persistentvolumes", // the resource plural + apiClient //the api client + ); pvcClient = - new GenericKubernetesApi<>( - V1PersistentVolumeClaim.class, // the api type class - V1PersistentVolumeClaimList.class, // the api list type class - "", // the api group - "v1", // the api version - "persistentvolumeclaims", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1PersistentVolumeClaim.class, // the api type class + V1PersistentVolumeClaimList.class, // the api list type class + "", // the api group + "v1", // the api version + "persistentvolumeclaims", // the resource plural + apiClient //the api client + ); rsClient = - new GenericKubernetesApi<>( - V1ReplicaSet.class, // the api type class - V1ReplicaSetList.class, // the api list type class - "", // the api group - "v1", // the api version - "replicasets", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1ReplicaSet.class, // the api type class + V1ReplicaSetList.class, // the api list type class + "", // the api group + "v1", // the api version + "replicasets", // the resource plural + apiClient //the api client + ); roleBindingClient = - new GenericKubernetesApi<>( - V1ClusterRoleBinding.class, // the api type class - V1ClusterRoleBindingList.class, // the api list type class - "rbac.authorization.k8s.io", // the api group - "v1", // the api version - "clusterrolebindings", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1ClusterRoleBinding.class, // the api type class + V1ClusterRoleBindingList.class, // the api list type class + "rbac.authorization.k8s.io", // the api group + "v1", // the api version + "clusterrolebindings", // the resource plural + apiClient //the api client + ); secretClient = - new GenericKubernetesApi<>( - V1Secret.class, // the api type class - V1SecretList.class, // the api list type class - "", // the api group - "v1", // the api version - "secrets", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Secret.class, // the api type class + V1SecretList.class, // the api list type class + "", // the api group + "v1", // the api version + "secrets", // the resource plural + apiClient //the api client + ); serviceClient = - new GenericKubernetesApi<>( - V1Service.class, // the api type class - V1ServiceList.class, // the api list type class - "", // the api group - "v1", // the api version - "services", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1Service.class, // the api type class + V1ServiceList.class, // the api list type class + "", // the api group + "v1", // the api version + "services", // the resource plural + apiClient //the api client + ); serviceAccountClient = - new GenericKubernetesApi<>( - V1ServiceAccount.class, // the api type class - V1ServiceAccountList.class, // the api list type class - "", // the api group - "v1", // the api version - "serviceaccounts", // the resource plural - apiClient //the api client - ); + new GenericKubernetesApi<>( + V1ServiceAccount.class, // the api type class + V1ServiceAccountList.class, // the api list type class + "", // the api group + "v1", // the api version + "serviceaccounts", // the resource plural + apiClient //the api client + ); deleteOptions = new DeleteOptions(); deleteOptions.setGracePeriodSeconds(0L); deleteOptions.setPropagationPolicy(FOREGROUND); @@ -304,11 +306,11 @@ public static boolean createDeployment(V1Deployment deployment) throws ApiExcept try { AppsV1Api apiInstance = new AppsV1Api(apiClient); V1Deployment createdDeployment = apiInstance.createNamespacedDeployment( - namespace, // String | namespace in which to create job - deployment, // V1Deployment | body of the V1Deployment containing deployment data - PRETTY, // String | pretty print output. - null, // String | dry run or permanent change - null // String | field manager who is making the change + namespace, // String | namespace in which to create job + deployment, // V1Deployment | body of the V1Deployment containing deployment data + PRETTY, // String | pretty print output. + null, // String | dry run or permanent change + null // String | field manager who is making the change ); if (createdDeployment != null) { status = true; @@ -332,17 +334,18 @@ public static V1DeploymentList listDeployments(String namespace) throws ApiExcep try { AppsV1Api apiInstance = new AppsV1Api(apiClient); deployments = apiInstance.listNamespacedDeployment( - namespace, // String | namespace. - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + namespace, // String | namespace. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); + } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); throw apex; @@ -362,14 +365,14 @@ public static boolean deleteDeployment(String namespace, String name) throws Api try { AppsV1Api apiInstance = new AppsV1Api(apiClient); apiInstance.deleteNamespacedDeployment( - name, // String | deployment object name. - namespace, // String | namespace in which the deployment exists. - PRETTY, // String | If 'true', then the output is pretty printed. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. + name, // String | deployment object name. + namespace, // String | namespace in which the deployment exists. + PRETTY, // String | If 'true', then the output is pretty printed. + null, // String | When present, indicates that modifications should not be persisted. + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + FOREGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -405,16 +408,16 @@ public static String getPodLog(String name, String namespace, String container) String log = null; try { log = coreV1Api.readNamespacedPodLog( - name, // name of the Pod - namespace, // name of the Namespace - container, // container for which to stream logs - null, // Boolean Follow the log stream of the pod - null, // number of bytes to read from the server before terminating the log output - PRETTY, // pretty print output - null, // Boolean, Return previous terminated container logs - null, // relative time (seconds) before the current time from which to show logs - null, // number of lines from the end of the logs to show - null // Boolean, add timestamp at the beginning of every line of log output + name, // name of the Pod + namespace, // name of the Namespace + container, // container for which to stream logs + null, // Boolean Follow the log stream of the pod + null, // number of bytes to read from the server before terminating the log output + PRETTY, // pretty print output + null, // Boolean, Return previous terminated container logs + null, // relative time (seconds) before the current time from which to show logs + null, // number of lines from the end of the logs to show + null // Boolean, add timestamp at the beginning of every line of log output ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -455,14 +458,14 @@ public static boolean deletePod(String name, String namespace) { if (!response.isSuccess()) { getLogger().warning("Failed to delete pod '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "pod in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "pod in background!"); } return true; @@ -480,7 +483,7 @@ public static boolean deletePod(String name, String namespace) { public static V1Pod getPod(String namespace, String labelSelector, String podName) throws ApiException { V1PodList pods = listPods(namespace, labelSelector); for (var pod : pods.getItems()) { - if (podName.equals(pod.getMetadata().getName())) { + if (pod.getMetadata().getName().contains(podName)) { return pod; } } @@ -511,7 +514,7 @@ public static String getPodIP(String namespace, String labelSelectors, String po * @throws ApiException if Kubernetes client API call fail */ public static DateTime getPodCreationTimestamp(String namespace, String labelSelector, String podName) - throws ApiException { + throws ApiException { V1Pod pod = getPod(namespace, labelSelector, podName); if (pod != null && pod.getMetadata() != null) { @@ -532,8 +535,8 @@ public static DateTime getPodCreationTimestamp(String namespace, String labelSel * @throws ApiException if Kubernetes client API call fails */ public static int getContainerRestartCount( - String namespace, String labelSelector, String podName, String containerName) - throws ApiException { + String namespace, String labelSelector, String podName, String containerName) + throws ApiException { V1Pod pod = getPod(namespace, labelSelector, podName); if (pod != null && pod.getStatus() != null) { @@ -548,15 +551,45 @@ public static int getContainerRestartCount( } } getLogger().severe("Container {0} status doesn't exist or pod's container statuses is empty in namespace {1}", - containerName, namespace); + containerName, namespace); } } else { getLogger().severe("Pod {0} doesn't exist or pod status is null in namespace {1}", - podName, namespace); + podName, namespace); } return 0; } + /** + * Get the container's image in the pod. + * @param namespace name of the pod's namespace + * @param labelSelector in the format "weblogic.operatorName in (%s)" + * @param podName name of the pod + * @param containerName name of the container, null if there is only one container + * @return image used for the container + * @throws ApiException if Kubernetes client API call fails + */ + public static String getContainerImage(String namespace, String podName, + String labelSelector, String containerName) throws ApiException { + V1Pod pod = getPod(namespace, labelSelector, podName); + if (pod != null) { + List containerList = pod.getSpec().getContainers(); + if (containerName == null && containerList.size() >= 1) { + return containerList.get(0).getImage(); + } else { + for (V1Container container : containerList) { + if (containerName.equals(container.getName())) { + return container.getImage(); + } + } + getLogger().info("Container {0} doesn't exist in pod {1} namespace {2}", + containerName, podName, namespace); + } + } else { + getLogger().severe("Pod " + podName + " doesn't exist in namespace " + namespace); + } + return null; + } /** * Get the weblogic.domainRestartVersion label from a given pod. @@ -568,7 +601,7 @@ public static int getContainerRestartCount( * @throws ApiException when there is error in querying the cluster */ public static String getPodRestartVersion(String namespace, String labelSelector, String podName) - throws ApiException { + throws ApiException { V1Pod pod = getPod(namespace, labelSelector, podName); if (pod != null) { // return the value of the weblogic.domainRestartVersion label @@ -591,7 +624,7 @@ public static V1PodList listPods(String namespace, String labelSelectors) throws V1PodList v1PodList = null; try { v1PodList - = coreV1Api.listNamespacedPod( + = coreV1Api.listNamespacedPod( namespace, // namespace in which to look for the pods. Boolean.FALSE.toString(), // pretty print output. Boolean.FALSE, // allowWatchBookmarks requests watch events with type "BOOKMARK". @@ -602,7 +635,7 @@ public static V1PodList listPods(String namespace, String labelSelectors) throws null, // shows changes that occur after that particular version of a resource. null, // Timeout for the list/watch call. Boolean.FALSE // Watch for changes to the described resources. - ); + ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); throw apex; @@ -619,7 +652,7 @@ public static V1PodList listPods(String namespace, String labelSelectors) throws * @throws ApiException when pod interaction fails */ public static void copyDirectoryFromPod(V1Pod pod, String srcPath, Path destination) - throws IOException, ApiException { + throws IOException, ApiException { Copy copy = new Copy(); copy.copyDirectoryFromPod(pod, srcPath, destination); } @@ -635,8 +668,8 @@ public static void copyDirectoryFromPod(V1Pod pod, String srcPath, Path destinat * @throws ApiException when pod interaction fails */ public static void copyFileToPod( - String namespace, String pod, String container, Path srcPath, Path destPath) - throws IOException, ApiException { + String namespace, String pod, String container, Path srcPath, Path destPath) + throws IOException, ApiException { Copy copy = new Copy(apiClient); copy.copyFileToPod(namespace, pod, container, srcPath, destPath); } @@ -655,10 +688,10 @@ public static boolean createNamespace(String name) throws ApiException { try { coreV1Api.createNamespace( - namespace, // name of the Namespace - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // name associated with the actor or entity that is making these changes + namespace, // name of the Namespace + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // name associated with the actor or entity that is making these changes ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -678,16 +711,16 @@ public static boolean createNamespace(String name) throws ApiException { public static boolean createNamespace(V1Namespace namespace) throws ApiException { if (namespace == null) { throw new IllegalArgumentException( - "Parameter 'namespace' cannot be null when calling createNamespace()"); + "Parameter 'namespace' cannot be null when calling createNamespace()"); } V1Namespace ns = null; try { ns = coreV1Api.createNamespace( - namespace, // V1Namespace configuration data object - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // name associated with the actor or entity that is making these changes + namespace, // V1Namespace configuration data object + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // name associated with the actor or entity that is making these changes ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -707,11 +740,11 @@ public static void replaceNamespace(V1Namespace ns) throws ApiException { try { coreV1Api.replaceNamespace( - ns.getMetadata().getName(), // name of the namespace - ns, // V1Namespace object body - PRETTY, // pretty print the output - null, // dry run or changes need to be permanent - null // field manager + ns.getMetadata().getName(), // name of the namespace + ns, // V1Namespace object body + PRETTY, // pretty print the output + null, // dry run or changes need to be permanent + null // field manager ); } catch (ApiException ex) { getLogger().severe(ex.getResponseBody()); @@ -729,15 +762,15 @@ public static List listNamespaces() throws ApiException { V1NamespaceList namespaceList; try { namespaceList = coreV1Api.listNamespace( - PRETTY, // pretty print output - ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" - null, // set when retrieving more results from the server - null, // selector to restrict the list of returned objects by their fields - null, // selector to restrict the list of returned objects by their labels - null, // maximum number of responses to return for a list call - RESOURCE_VERSION, // shows changes that occur after that particular version of a resource - TIMEOUT_SECONDS, // Timeout for the list/watch call - false // Watch for changes to the described resources + PRETTY, // pretty print output + ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" + null, // set when retrieving more results from the server + null, // selector to restrict the list of returned objects by their fields + null, // selector to restrict the list of returned objects by their labels + null, // maximum number of responses to return for a list call + RESOURCE_VERSION, // shows changes that occur after that particular version of a resource + TIMEOUT_SECONDS, // Timeout for the list/watch call + false // Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -760,15 +793,15 @@ public static V1NamespaceList listNamespacesAsObjects() throws ApiException { V1NamespaceList namespaceList; try { namespaceList = coreV1Api.listNamespace( - PRETTY, // pretty print output - ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" - null, // set when retrieving more results from the server - null, // selector to restrict the list of returned objects by their fields - null, // selector to restrict the list of returned objects by their labels - null, // maximum number of responses to return for a list call - RESOURCE_VERSION, // shows changes that occur after that particular version of a resource - TIMEOUT_SECONDS, // Timeout for the list/watch call - false // Watch for changes to the described resources + PRETTY, // pretty print output + ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" + null, // set when retrieving more results from the server + null, // selector to restrict the list of returned objects by their fields + null, // selector to restrict the list of returned objects by their labels + null, // maximum number of responses to return for a list call + RESOURCE_VERSION, // shows changes that occur after that particular version of a resource + TIMEOUT_SECONDS, // Timeout for the list/watch call + false // Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -797,21 +830,21 @@ public static boolean deleteNamespace(String name) { return false; } else { getLogger().warning("Failed to delete namespace: " - + name + " with HTTP status code: " + response.getHttpStatusCode()); + + name + " with HTTP status code: " + response.getHttpStatusCode()); return false; } } withStandardRetryPolicy .conditionEvaluationListener( - condition -> getLogger().info("Waiting for namespace {0} to be deleted " - + "(elapsed time {1}ms, remaining time {2}ms)", - name, - condition.getElapsedTimeInMS(), - condition.getRemainingTimeInMS())) + condition -> getLogger().info("Waiting for namespace {0} to be deleted " + + "(elapsed time {1}ms, remaining time {2}ms)", + name, + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) .until(assertDoesNotThrow(() -> namespaceDeleted(name), - String.format("namespaceExists failed with ApiException for namespace %s", - name))); + String.format("namespaceExists failed with ApiException for namespace %s", + name))); return true; } @@ -842,16 +875,16 @@ public static List listNamespacedEvents(String namespace) throws ApiExc List events = null; try { V1EventList list = coreV1Api.listNamespacedEvent( - namespace, // String | namespace. - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + namespace, // String | namespace. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); events = list.getItems(); events.sort(Comparator.comparing(e -> e.getMetadata().getCreationTimestamp())); @@ -874,17 +907,17 @@ public static List listNamespacedEvents(String namespace) throws ApiExc public static boolean createDomainCustomResource(Domain domain) throws ApiException { if (domain == null) { throw new IllegalArgumentException( - "Parameter 'domain' cannot be null when calling createDomainCustomResource()"); + "Parameter 'domain' cannot be null when calling createDomainCustomResource()"); } if (domain.metadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'domain' cannot be null when calling createDomainCustomResource()"); + "'metadata' field of the parameter 'domain' cannot be null when calling createDomainCustomResource()"); } if (domain.metadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createDomainCustomResource()"); + "'namespace' field in the metadata cannot be null when calling createDomainCustomResource()"); } String namespace = domain.metadata().getNamespace(); @@ -894,14 +927,14 @@ public static boolean createDomainCustomResource(Domain domain) throws ApiExcept Object response; try { response = customObjectsApi.createNamespacedCustomObject( - DOMAIN_GROUP, // custom resource's group name - DOMAIN_VERSION, // //custom resource's version - namespace, // custom resource's namespace - DOMAIN_PLURAL, // custom resource's plural name - json, // JSON schema of the Resource to create - null, // pretty print output - null, // dry run - null // field manager + DOMAIN_GROUP, // custom resource's group name + DOMAIN_VERSION, // //custom resource's version + namespace, // custom resource's namespace + DOMAIN_PLURAL, // custom resource's plural name + json, // JSON schema of the Resource to create + null, // pretty print output + null, // dry run + null // field manager ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -935,15 +968,15 @@ public static boolean deleteDomainCustomResource(String domainUid, String namesp if (!response.isSuccess()) { getLogger().warning( - "Failed to delete Domain Custom Resource '" + domainUid + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + "Failed to delete Domain Custom Resource '" + domainUid + "' from namespace: " + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "domain custom resource in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "domain custom resource in background!"); } return true; @@ -958,15 +991,15 @@ public static boolean deleteDomainCustomResource(String domainUid, String namesp * @throws ApiException if Kubernetes request fails */ public static Domain getDomainCustomResource(String domainUid, String namespace) - throws ApiException { + throws ApiException { Object domain; try { domain = customObjectsApi.getNamespacedCustomObject( - DOMAIN_GROUP, // custom resource's group name - DOMAIN_VERSION, // //custom resource's version - namespace, // custom resource's namespace - DOMAIN_PLURAL, // custom resource's plural name - domainUid // custom object's name + DOMAIN_GROUP, // custom resource's group name + DOMAIN_VERSION, // //custom resource's version + namespace, // custom resource's namespace + DOMAIN_PLURAL, // custom resource's plural name + domainUid // custom object's name ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -996,12 +1029,12 @@ public static Domain getDomainCustomResource(String domainUid, String namespace) * @return true if patch is successful otherwise false */ public static boolean patchCustomResourceDomainJsonPatch(String domainUid, String namespace, - String patchString) { + String patchString) { return patchDomainCustomResource( - domainUid, // name of custom resource domain - namespace, // name of namespace - new V1Patch(patchString), // patch data - V1Patch.PATCH_FORMAT_JSON_PATCH // "application/json-patch+json" patch format + domainUid, // name of custom resource domain + namespace, // name of namespace + new V1Patch(patchString), // patch data + V1Patch.PATCH_FORMAT_JSON_PATCH // "application/json-patch+json" patch format ); } @@ -1020,12 +1053,12 @@ public static boolean patchCustomResourceDomainJsonPatch(String domainUid, Strin * @return true if patch is successful otherwise false */ public static boolean patchCustomResourceDomainJsonMergePatch(String domainUid, String namespace, - String patchString) { + String patchString) { return patchDomainCustomResource( - domainUid, // name of custom resource domain - namespace, // name of namespace - new V1Patch(patchString), // patch data - V1Patch.PATCH_FORMAT_JSON_MERGE_PATCH // "application/merge-patch+json" patch format + domainUid, // name of custom resource domain + namespace, // name of namespace + new V1Patch(patchString), // patch data + V1Patch.PATCH_FORMAT_JSON_MERGE_PATCH // "application/merge-patch+json" patch format ); } @@ -1040,26 +1073,63 @@ public static boolean patchCustomResourceDomainJsonMergePatch(String domainUid, * @return true if successful, false otherwise */ public static boolean patchDomainCustomResource(String domainUid, String namespace, - V1Patch patch, String patchFormat) { + V1Patch patch, String patchFormat) { // GenericKubernetesApi uses CustomObjectsApi calls KubernetesApiResponse response = crdClient.patch( - namespace, // name of namespace - domainUid, // name of custom resource domain - patchFormat, // "application/json-patch+json" or "application/merge-patch+json" - patch // patch data + namespace, // name of namespace + domainUid, // name of custom resource domain + patchFormat, // "application/json-patch+json" or "application/merge-patch+json" + patch // patch data ); if (!response.isSuccess()) { getLogger().warning( - "Failed to patch " + domainUid + " in namespace " + namespace + " using patch format: " - + patchFormat); + "Failed to patch " + domainUid + " in namespace " + namespace + " using patch format: " + + patchFormat); return false; } return true; } + /** + * Patch the Deployment. + * + * @param deploymentName name of the deployment + * @param namespace name of namespace + * @param patch patch data in format matching the specified media type + * @param patchFormat one of the following types used to identify patch document: + * "application/json-patch+json", "application/merge-patch+json", + * @return true if successful, false otherwise + */ + public static boolean patchDeployment(String deploymentName, String namespace, + V1Patch patch, String patchFormat) { + + AppsV1Api apiInstance = new AppsV1Api(apiClient); + try { + PatchUtils.patch( + V1Deployment.class, + () -> + apiInstance.patchNamespacedDeploymentCall( + deploymentName, + namespace, + patch, + null, + null, + null, // field-manager is optional + null, + null), + patchFormat, + apiClient); + } catch (ApiException apiex) { + getLogger().warning("Exception while patching the deployment {0} in namespace {1} : {2} ", + deploymentName, namespace, apiex); + return false; + } + return true; + } + /** * Converts the response to appropriate type. * @@ -1101,17 +1171,17 @@ public static DomainList listDomains(String namespace) { public static boolean createConfigMap(V1ConfigMap configMap) throws ApiException { if (configMap == null) { throw new IllegalArgumentException( - "Parameter 'configMap' cannot be null when calling createConfigMap()"); + "Parameter 'configMap' cannot be null when calling createConfigMap()"); } if (configMap.getMetadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'configMap' cannot be null when calling createConfigMap()"); + "'metadata' field of the parameter 'configMap' cannot be null when calling createConfigMap()"); } if (configMap.getMetadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createConfigMap()"); + "'namespace' field in the metadata cannot be null when calling createConfigMap()"); } String namespace = configMap.getMetadata().getNamespace(); @@ -1119,11 +1189,11 @@ public static boolean createConfigMap(V1ConfigMap configMap) throws ApiException V1ConfigMap cm; try { cm = coreV1Api.createNamespacedConfigMap( - namespace, // config map's namespace - configMap, // config map configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // name associated with the actor or entity that is making these changes + namespace, // config map's namespace + configMap, // config map configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // name associated with the actor or entity that is making these changes ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1191,16 +1261,16 @@ public static V1ConfigMapList listConfigMaps(String namespace) throws ApiExcepti V1ConfigMapList configMapList; try { configMapList = coreV1Api.listNamespacedConfigMap( - namespace, // config map's namespace - PRETTY, // pretty print output - ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" - null, // set when retrieving more results from the server - null, // selector to restrict the list of returned objects by their fields - null, // selector to restrict the list of returned objects by their labels - null, // maximum number of responses to return for a list call - RESOURCE_VERSION, // shows changes that occur after that particular version of a resource - TIMEOUT_SECONDS, // Timeout for the list/watch call - false // Watch for changes to the described resources + namespace, // config map's namespace + PRETTY, // pretty print output + ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" + null, // set when retrieving more results from the server + null, // selector to restrict the list of returned objects by their fields + null, // selector to restrict the list of returned objects by their labels + null, // maximum number of responses to return for a list call + RESOURCE_VERSION, // shows changes that occur after that particular version of a resource + TIMEOUT_SECONDS, // Timeout for the list/watch call + false // Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1222,14 +1292,14 @@ public static boolean deleteConfigMap(String name, String namespace) { KubernetesApiResponse response = configMapClient.delete(namespace, name, deleteOptions); if (!response.isSuccess()) { getLogger().warning("Failed to delete config map '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "config map in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "config map in background!"); } return true; @@ -1246,17 +1316,17 @@ public static boolean deleteConfigMap(String name, String namespace) { public static boolean createSecret(V1Secret secret) throws ApiException { if (secret == null) { throw new IllegalArgumentException( - "Parameter 'secret' cannot be null when calling createSecret()"); + "Parameter 'secret' cannot be null when calling createSecret()"); } if (secret.getMetadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'secret' cannot be null when calling createSecret()"); + "'metadata' field of the parameter 'secret' cannot be null when calling createSecret()"); } if (secret.getMetadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createSecret()"); + "'namespace' field in the metadata cannot be null when calling createSecret()"); } String namespace = secret.getMetadata().getNamespace(); @@ -1264,11 +1334,11 @@ public static boolean createSecret(V1Secret secret) throws ApiException { V1Secret v1Secret; try { v1Secret = coreV1Api.createNamespacedSecret( - namespace, // name of the Namespace - secret, // secret configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + namespace, // name of the Namespace + secret, // secret configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1291,14 +1361,14 @@ public static boolean deleteSecret(String name, String namespace) { if (!response.isSuccess()) { getLogger().warning("Failed to delete secret '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "secret in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "secret in background!"); } return true; @@ -1330,16 +1400,16 @@ public static V1SecretList listSecrets(String namespace) { public static boolean createPv(V1PersistentVolume persistentVolume) throws ApiException { if (persistentVolume == null) { throw new IllegalArgumentException( - "Parameter 'persistentVolume' cannot be null when calling createPv()"); + "Parameter 'persistentVolume' cannot be null when calling createPv()"); } V1PersistentVolume pv; try { pv = coreV1Api.createPersistentVolume( - persistentVolume, // persistent volume configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + persistentVolume, // persistent volume configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1353,24 +1423,24 @@ public static boolean createPv(V1PersistentVolume persistentVolume) throws ApiEx * Create a Kubernetes Persistent Volume Claim. * * @param persistentVolumeClaim V1PersistentVolumeClaim object containing Kubernetes persistent volume claim - configuration data + configuration data * @return true if successful * @throws ApiException if Kubernetes client API call fails */ public static boolean createPvc(V1PersistentVolumeClaim persistentVolumeClaim) throws ApiException { if (persistentVolumeClaim == null) { throw new IllegalArgumentException( - "Parameter 'persistentVolume' cannot be null when calling createPvc()"); + "Parameter 'persistentVolume' cannot be null when calling createPvc()"); } if (persistentVolumeClaim.getMetadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'persistentVolumeClaim' cannot be null when calling createPvc()"); + "'metadata' field of the parameter 'persistentVolumeClaim' cannot be null when calling createPvc()"); } if (persistentVolumeClaim.getMetadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createPvc()"); + "'namespace' field in the metadata cannot be null when calling createPvc()"); } String namespace = persistentVolumeClaim.getMetadata().getNamespace(); @@ -1378,11 +1448,11 @@ public static boolean createPvc(V1PersistentVolumeClaim persistentVolumeClaim) t V1PersistentVolumeClaim pvc; try { pvc = coreV1Api.createNamespacedPersistentVolumeClaim( - namespace, // name of the Namespace - persistentVolumeClaim, // persistent volume claim configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + namespace, // name of the Namespace + persistentVolumeClaim, // persistent volume claim configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1404,14 +1474,14 @@ public static boolean deletePv(String name) { if (!response.isSuccess()) { getLogger().warning("Failed to delete persistent volume '" + name + "' " - + "with HTTP status code: " + response.getHttpStatusCode()); + + "with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "persistent volume in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "persistent volume in background!"); } return true; @@ -1430,15 +1500,15 @@ public static boolean deletePvc(String name, String namespace) { if (!response.isSuccess()) { getLogger().warning( - "Failed to delete persistent volume claim '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + "Failed to delete persistent volume claim '" + name + "' from namespace: " + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "persistent volume claim in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "persistent volume claim in background!"); } return true; @@ -1454,7 +1524,7 @@ public static V1PersistentVolumeList listPersistentVolumes() { return list.getObject(); } else { getLogger().warning("Failed to list Persistent Volumes," - + " status code {0}", list.getHttpStatusCode()); + + " status code {0}", list.getHttpStatusCode()); return null; } } @@ -1469,15 +1539,15 @@ public static V1PersistentVolumeList listPersistentVolumes(String labels) throws V1PersistentVolumeList listPersistentVolume; try { listPersistentVolume = coreV1Api.listPersistentVolume( - PRETTY, // pretty print output - ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" - null, // set when retrieving more results from the server - null, // selector to restrict the list of returned objects by their fields - labels, // selector to restrict the list of returned objects by their labels - null, // maximum number of responses to return for a list call - RESOURCE_VERSION, // shows changes that occur after that particular version of a resource - TIMEOUT_SECONDS, // Timeout for the list/watch call - false // Watch for changes to the described resources + PRETTY, // pretty print output + ALLOW_WATCH_BOOKMARKS, // allowWatchBookmarks requests watch events with type "BOOKMARK" + null, // set when retrieving more results from the server + null, // selector to restrict the list of returned objects by their fields + labels, // selector to restrict the list of returned objects by their labels + null, // maximum number of responses to return for a list call + RESOURCE_VERSION, // shows changes that occur after that particular version of a resource + TIMEOUT_SECONDS, // Timeout for the list/watch call + false // Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1497,7 +1567,7 @@ public static V1PersistentVolumeClaimList listPersistentVolumeClaims(String name return list.getObject(); } else { getLogger().warning("Failed to list Persistent Volumes claims," - + " status code {0}", list.getHttpStatusCode()); + + " status code {0}", list.getHttpStatusCode()); return null; } } @@ -1511,31 +1581,31 @@ public static V1PersistentVolumeClaimList listPersistentVolumeClaims(String name * @throws ApiException if Kubernetes client API call fails */ public static V1ServiceAccount createServiceAccount(V1ServiceAccount serviceAccount) - throws ApiException { + throws ApiException { if (serviceAccount == null) { throw new IllegalArgumentException( - "Parameter 'serviceAccount' cannot be null when calling createServiceAccount()"); + "Parameter 'serviceAccount' cannot be null when calling createServiceAccount()"); } if (serviceAccount.getMetadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'serviceAccount' cannot be null when calling createServiceAccount()"); + "'metadata' field of the parameter 'serviceAccount' cannot be null when calling createServiceAccount()"); } if (serviceAccount.getMetadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createServiceAccount()"); + "'namespace' field in the metadata cannot be null when calling createServiceAccount()"); } String namespace = serviceAccount.getMetadata().getNamespace(); try { serviceAccount = coreV1Api.createNamespacedServiceAccount( - namespace, // name of the Namespace - serviceAccount, // service account configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + namespace, // name of the Namespace + serviceAccount, // service account configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1558,17 +1628,17 @@ public static boolean deleteServiceAccount(String name, String namespace) { if (!response.isSuccess()) { getLogger().warning("Failed to delete Service Account '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "service account in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "service account in background!"); V1ServiceAccount serviceAccount = (V1ServiceAccount) response.getObject(); getLogger().info( - "Deleting Service Account " + serviceAccount.getMetadata().getName() + " in background."); + "Deleting Service Account " + serviceAccount.getMetadata().getName() + " in background."); } return true; @@ -1601,17 +1671,17 @@ public static V1ServiceAccountList listServiceAccounts(String namespace) { public static boolean createService(V1Service service) throws ApiException { if (service == null) { throw new IllegalArgumentException( - "Parameter 'service' cannot be null when calling createService()"); + "Parameter 'service' cannot be null when calling createService()"); } if (service.getMetadata() == null) { throw new IllegalArgumentException( - "'metadata' field of the parameter 'service' cannot be null when calling createService()"); + "'metadata' field of the parameter 'service' cannot be null when calling createService()"); } if (service.getMetadata().getNamespace() == null) { throw new IllegalArgumentException( - "'namespace' field in the metadata cannot be null when calling createService()"); + "'namespace' field in the metadata cannot be null when calling createService()"); } String namespace = service.getMetadata().getNamespace(); @@ -1619,11 +1689,11 @@ public static boolean createService(V1Service service) throws ApiException { V1Service svc; try { svc = coreV1Api.createNamespacedService( - namespace, // name of the Namespace - service, // service configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + namespace, // name of the Namespace + service, // service configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1646,14 +1716,14 @@ public static boolean deleteService(String name, String namespace) { if (!response.isSuccess()) { getLogger().warning("Failed to delete Service '" + name + "' from namespace: " - + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "service in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "service in background!"); } return true; @@ -1691,7 +1761,7 @@ public static int getServiceNodePort(String namespace, String serviceName, Strin if (service != null) { V1ServicePort port = service.getSpec().getPorts().stream().filter( v1ServicePort -> v1ServicePort.getName().equalsIgnoreCase(channelName)) - .findAny().orElse(null); + .findAny().orElse(null); if (port != null) { return port.getNodePort(); } @@ -1699,6 +1769,23 @@ public static int getServiceNodePort(String namespace, String serviceName, Strin return -1; } + /** + * Get port of a namespaced service. + * + * @param namespace name of the namespace in which to get the service + * @param serviceName name of the service + * @return node port if service found otherwise -1 + */ + public static Integer getServiceNodePort(String namespace, String serviceName) { + List services = listServices(namespace).getItems(); + for (V1Service service : services) { + if (service.getMetadata().getName().startsWith(serviceName)) { + return service.getSpec().getPorts().get(0).getNodePort(); + } + } + return -1; + } + /** * Get port of a namespaced service given the channel name. * @@ -1712,7 +1799,7 @@ public static int getServicePort(String namespace, String serviceName, String ch if (service != null) { V1ServicePort port = service.getSpec().getPorts().stream().filter( v1ServicePort -> v1ServicePort.getName().equalsIgnoreCase(channelName)) - .findAny().orElse(null); + .findAny().orElse(null); if (port != null) { return port.getPort(); } @@ -1733,7 +1820,7 @@ public static V1ServiceList listServices(String namespace) { return list.getObject(); } else { getLogger().warning("Failed to list services in namespace {0}, status code {1}", - namespace, list.getHttpStatusCode()); + namespace, list.getHttpStatusCode()); return null; } } @@ -1751,11 +1838,11 @@ public static String createNamespacedJob(V1Job jobBody) throws ApiException { try { BatchV1Api apiInstance = new BatchV1Api(apiClient); V1Job createdJob = apiInstance.createNamespacedJob( - namespace, // String | namespace in which to create job - jobBody, // V1Job | body of the V1Job containing job data - PRETTY, // String | pretty print output. - null, // String | dry run or permanent change - null // String | field manager who is making the change + namespace, // String | namespace in which to create job + jobBody, // V1Job | body of the V1Job containing job data + PRETTY, // String | pretty print output. + null, // String | dry run or permanent change + null // String | field manager who is making the change ); if (createdJob != null) { name = createdJob.getMetadata().getName(); @@ -1775,23 +1862,22 @@ public static String createNamespacedJob(V1Job jobBody) throws ApiException { * @return true if delete was successful * @throws ApiException when deletion of job fails */ - public static boolean deleteJob(String namespace, String name) throws ApiException { - try { - BatchV1Api apiInstance = new BatchV1Api(apiClient); - apiInstance.deleteNamespacedJob( - name, // String | name of the job. - namespace, // String | name of the namespace. - PRETTY, // String | pretty print output. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. - ); - } catch (ApiException apex) { - getLogger().warning(apex.getResponseBody()); - throw apex; + public static boolean deleteJob(String namespace, String name) { + + KubernetesApiResponse response = jobClient.delete(namespace, name); + + if (!response.isSuccess()) { + getLogger().warning("Failed to delete job '" + name + "' from namespace: " + + namespace + " with HTTP status code: " + response.getHttpStatusCode()); + return false; } + + if (response.getObject() != null) { + getLogger().info( + "Received after-deletion status of the requested object, will be deleting " + + "job in background!"); + } + return true; } @@ -1807,16 +1893,16 @@ public static V1JobList listJobs(String namespace) throws ApiException { try { BatchV1Api apiInstance = new BatchV1Api(apiClient); list = apiInstance.listNamespacedJob( - namespace, // String | name of the namespace. - PRETTY, // String | pretty print output. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. - Boolean.FALSE // Boolean | Watch for changes to the described resources + namespace, // String | name of the namespace. + PRETTY, // String | pretty print output. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. + Boolean.FALSE // Boolean | Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -1860,14 +1946,14 @@ public static boolean deleteReplicaSet(String namespace, String name) throws Api try { AppsV1Api apiInstance = new AppsV1Api(apiClient); apiInstance.deleteNamespacedReplicaSet( - name, // String | name of the replica set. - namespace, // String | name of the namespace. - PRETTY, // String | pretty print output. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. + name, // String | name of the replica set. + namespace, // String | name of the namespace. + PRETTY, // String | pretty print output. + null, // String | When present, indicates that modifications should not be persisted. + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + FOREGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -1887,16 +1973,16 @@ public static V1ReplicaSetList listReplicaSets(String namespace) throws ApiExcep try { AppsV1Api apiInstance = new AppsV1Api(apiClient); V1ReplicaSetList list = apiInstance.listNamespacedReplicaSet( - namespace, // String | namespace. - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + namespace, // String | namespace. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); return list; } catch (ApiException apex) { @@ -1916,10 +2002,10 @@ public static V1ReplicaSetList listReplicaSets(String namespace) throws ApiExcep public static boolean createClusterRole(V1ClusterRole clusterRole) throws ApiException { try { V1ClusterRole cr = rbacAuthApi.createClusterRole( - clusterRole, // cluster role configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + clusterRole, // cluster role configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1937,13 +2023,13 @@ public static boolean createClusterRole(V1ClusterRole clusterRole) throws ApiExc * @throws ApiException if Kubernetes client API call fails */ public static boolean createClusterRoleBinding(V1ClusterRoleBinding clusterRoleBinding) - throws ApiException { + throws ApiException { try { V1ClusterRoleBinding crb = rbacAuthApi.createClusterRoleBinding( - clusterRoleBinding, // role binding configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + clusterRoleBinding, // role binding configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1964,11 +2050,11 @@ public static boolean createClusterRoleBinding(V1ClusterRoleBinding clusterRoleB public static boolean createNamespacedRoleBinding(String namespace, V1RoleBinding roleBinding) throws ApiException { try { V1RoleBinding crb = rbacAuthApi.createNamespacedRoleBinding( - namespace, // namespace where this role binding is created - roleBinding, // role binding configuration data - PRETTY, // pretty print output - null, // indicates that modifications should not be persisted - null // fieldManager is a name associated with the actor + namespace, // namespace where this role binding is created + roleBinding, // role binding configuration data + PRETTY, // pretty print output + null, // indicates that modifications should not be persisted + null // fieldManager is a name associated with the actor ); } catch (ApiException apex) { getLogger().severe(apex.getResponseBody()); @@ -1989,15 +2075,15 @@ public static boolean deleteClusterRoleBinding(String name) { if (!response.isSuccess()) { getLogger().warning( - "Failed to delete Cluster Role Binding '" + name + " with HTTP status code: " + response - .getHttpStatusCode()); + "Failed to delete Cluster Role Binding '" + name + " with HTTP status code: " + response + .getHttpStatusCode()); return false; } if (response.getObject() != null) { getLogger().info( - "Received after-deletion status of the requested object, will be deleting " - + "Cluster Role Binding " + name + " in background!"); + "Received after-deletion status of the requested object, will be deleting " + + "Cluster Role Binding " + name + " in background!"); } return true; @@ -2014,15 +2100,15 @@ public static V1RoleBindingList listRoleBindingForAllNamespaces(String labelSele V1RoleBindingList roleBindings; try { roleBindings = rbacAuthApi.listRoleBindingForAllNamespaces( - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - labelSelector, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - PRETTY, // String | If true, then the output is pretty printed. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. - Boolean.FALSE // Boolean | Watch for changes to the described resources + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + labelSelector, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + PRETTY, // String | If true, then the output is pretty printed. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. + Boolean.FALSE // Boolean | Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2042,15 +2128,15 @@ public static V1ClusterRoleBindingList listClusterRoleBindings(String labelSelec V1ClusterRoleBindingList clusterRoleBindingList; try { clusterRoleBindingList = rbacAuthApi.listClusterRoleBinding( - PRETTY, // String | If true, then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - labelSelector, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. - Boolean.FALSE // Boolean | Watch for changes to the described resources + PRETTY, // String | If true, then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + labelSelector, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. + Boolean.FALSE // Boolean | Watch for changes to the described resources ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2068,17 +2154,17 @@ public static V1ClusterRoleBindingList listClusterRoleBindings(String labelSelec * @throws ApiException when delete rolebinding fails */ public static boolean deleteNamespacedRoleBinding(String namespace, String name) - throws ApiException { + throws ApiException { try { rbacAuthApi.deleteNamespacedRoleBinding( - name, // String | name of the job. - namespace, // String | name of the namespace. - PRETTY, // String | pretty print output. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. + name, // String | name of the job. + namespace, // String | name of the namespace. + PRETTY, // String | pretty print output. + null, // String | When present, indicates that modifications should not be persisted. + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + FOREGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2095,20 +2181,20 @@ public static boolean deleteNamespacedRoleBinding(String namespace, String name) * @throws ApiException when listing fails */ public static V1RoleBindingList listNamespacedRoleBinding(String namespace) - throws ApiException { + throws ApiException { V1RoleBindingList roleBindings; try { roleBindings = rbacAuthApi.listNamespacedRoleBinding( - namespace, // String | namespace. - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + namespace, // String | namespace. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2128,13 +2214,13 @@ public static V1RoleBindingList listNamespacedRoleBinding(String namespace) public static boolean deleteClusterRole(String name) throws ApiException { try { rbacAuthApi.deleteClusterRole( - name, // String | name of the role. - PRETTY, // String | pretty print output. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. + name, // String | name of the role. + PRETTY, // String | pretty print output. + null, // String | When present, indicates that modifications should not be persisted. + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + FOREGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2155,15 +2241,15 @@ public static V1ClusterRoleList listClusterRoles(String labelSelector) throws Ap V1ClusterRoleList roles; try { roles = rbacAuthApi.listClusterRole( - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - labelSelector, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + labelSelector, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2183,14 +2269,14 @@ public static V1ClusterRoleList listClusterRoles(String labelSelector) throws Ap public static boolean deleteNamespacedRole(String namespace, String name) throws ApiException { try { rbacAuthApi.deleteNamespacedRole( - name, // String | name of the job. - namespace, // String | name of the namespace. - PRETTY, // String | pretty print output. - null, // String | When present, indicates that modifications should not be persisted. - GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. - null, // Boolean | Deprecated: use the PropagationPolicy. - FOREGROUND, // String | Whether and how garbage collection will be performed. - null // V1DeleteOptions. + name, // String | name of the job. + namespace, // String | name of the namespace. + PRETTY, // String | pretty print output. + null, // String | When present, indicates that modifications should not be persisted. + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + FOREGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2210,16 +2296,16 @@ public static V1RoleList listNamespacedRoles(String namespace) throws ApiExcepti V1RoleList roles; try { roles = rbacAuthApi.listNamespacedRole( - namespace, // String | namespace. - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list call. - Boolean.FALSE // Boolean | Watch for changes to the described resources. + namespace, // String | namespace. + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list call. + Boolean.FALSE // Boolean | Watch for changes to the described resources. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2240,16 +2326,16 @@ public static NetworkingV1beta1IngressList listNamespacedIngresses(String namesp try { NetworkingV1beta1Api apiInstance = new NetworkingV1beta1Api(apiClient); ingressList = apiInstance.listNamespacedIngress( - namespace, // namespace - PRETTY, // String | If 'true', then the output is pretty printed. - ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". - null, // String | The continue option should be set when retrieving more results from the server. - null, // String | A selector to restrict the list of returned objects by their fields. - null, // String | A selector to restrict the list of returned objects by their labels. - null, // Integer | limit is a maximum number of responses to return for a list call. - RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. - TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. - ALLOW_WATCH_BOOKMARKS // Boolean | Watch for changes to the described resources. + namespace, // namespace + PRETTY, // String | If 'true', then the output is pretty printed. + ALLOW_WATCH_BOOKMARKS, // Boolean | allowWatchBookmarks requests watch events with type "BOOKMARK". + null, // String | The continue option should be set when retrieving more results from the server. + null, // String | A selector to restrict the list of returned objects by their fields. + null, // String | A selector to restrict the list of returned objects by their labels. + null, // Integer | limit is a maximum number of responses to return for a list call. + RESOURCE_VERSION, // String | Shows changes that occur after that particular version of a resource. + TIMEOUT_SECONDS, // Integer | Timeout for the list/watch call. + ALLOW_WATCH_BOOKMARKS // Boolean | Watch for changes to the described resources. ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2258,6 +2344,35 @@ public static NetworkingV1beta1IngressList listNamespacedIngresses(String namesp return ingressList; } + /** + * Delete an ingress in the specified namespace. + * + * @param name ingress name to be deleted + * @param namespace namespace in which the specified ingress exists + * @return true if deleting ingress succeed, false otherwise + * @throws ApiException if Kubernetes API client call fails + */ + public static boolean deleteIngress(String name, String namespace) throws ApiException { + try { + NetworkingV1beta1Api apiInstance = new NetworkingV1beta1Api(apiClient); + apiInstance.deleteNamespacedIngress( + name, // ingress name + namespace, // namespace + PRETTY, // String | If 'true', then the output is pretty printed. + null, // String | dry run or permanent change + GRACE_PERIOD, // Integer | The duration in seconds before the object should be deleted. + null, // Boolean | Deprecated: use the PropagationPolicy. + BACKGROUND, // String | Whether and how garbage collection will be performed. + null // V1DeleteOptions. + ); + } catch (ApiException apex) { + getLogger().warning(apex.getResponseBody()); + throw apex; + } + + return true; + } + /** * Get Ingress in the given namespace by name. * @@ -2267,10 +2382,10 @@ public static NetworkingV1beta1IngressList listNamespacedIngresses(String namesp * @throws ApiException when get fails */ public static NetworkingV1beta1Ingress getNamespacedIngress(String namespace, String name) - throws ApiException { + throws ApiException { try { for (NetworkingV1beta1Ingress item - : listNamespacedIngresses(namespace).getItems()) { + : listNamespacedIngresses(namespace).getItems()) { if (name.equals(item.getMetadata().getName())) { return item; } @@ -2298,52 +2413,55 @@ public static NetworkingV1beta1Ingress getNamespacedIngress(String namespace, St * @throws InterruptedException if any thread has interrupted the current thread */ public static ExecResult exec(V1Pod pod, String containerName, boolean redirectToStdout, - String... command) - throws IOException, ApiException, InterruptedException { + String... command) + throws IOException, ApiException, InterruptedException { // Execute command using Kubernetes API KubernetesExec kubernetesExec = createKubernetesExec(pod, containerName); final Process proc = kubernetesExec.exec(command); + // If redirect enabled, copy stdout and stderr to corresponding Outputstream final CopyingOutputStream copyOut = - redirectToStdout ? new CopyingOutputStream(System.out) : new CopyingOutputStream(null); + redirectToStdout ? new CopyingOutputStream(System.out) : new CopyingOutputStream(null); + final CopyingOutputStream copyErr = + redirectToStdout ? new CopyingOutputStream(System.err) : new CopyingOutputStream(null); // Start a thread to begin reading the output stream of the command - Thread out = null; try { - out = - new Thread( - () -> { - try { - ByteStreams.copy(proc.getInputStream(), copyOut); - } catch (IOException ex) { - // "Pipe broken" is expected when process is finished so don't log - if (ex.getMessage() != null && !ex.getMessage().contains("Pipe broken")) { - getLogger().warning("Exception reading from input stream.", ex); - } - } - }); + Thread out = createStreamReader(proc.getInputStream(), copyOut, + "Exception reading from stdout input stream."); out.start(); + // Start a thread to begin reading the error stream of the command + Thread err = createStreamReader(proc.getErrorStream(), copyErr, + "Exception reading from stderr input stream."); + err.start(); + // wait for the process, which represents the executing command, to terminate proc.waitFor(); - // wait for reading thread to finish any remaining output + // wait for stdout reading thread to finish any remaining output out.join(); + // wait for stderr reading thread to finish any remaining output + err.join(); + // Read data from process's stdout String stdout = readExecCmdData(copyOut.getInputStream()); // Read from process's stderr, if data available - String stderr = null; - try { - stderr = (proc.getErrorStream().available() != 0) ? readExecCmdData(proc.getErrorStream()) : null; - } catch (IllegalStateException e) { - // IllegalStateException thrown when stream is already closed, ignore since there is - // nothing to read + String stderr = readExecCmdData(copyErr.getInputStream());; + + ExecResult result = new ExecResult(proc.exitValue(), stdout, stderr); + getLogger().fine("result from exec command: " + result); + + if (result.exitValue() != 0) { + getLogger().info("result.exitValue={0}", result.exitValue()); + getLogger().info("result.stdout={0}", result.stdout()); + getLogger().info("result.stderr={0}", result.stderr()); } - return new ExecResult(proc.exitValue(), stdout, stderr); + return result; } finally { if (proc != null) { proc.destroy(); @@ -2351,6 +2469,22 @@ public static ExecResult exec(V1Pod pod, String containerName, boolean redirectT } } + private static Thread createStreamReader(InputStream inputStream, CopyingOutputStream copyOut, + String s) { + return + new Thread( + () -> { + try { + ByteStreams.copy(inputStream, copyOut); + } catch (IOException ex) { + // "Pipe broken" is expected when process is finished so don't log + if (ex.getMessage() != null && !ex.getMessage().contains("Pipe broken")) { + getLogger().warning(s, ex); + } + } + }); + } + /** * Create an object which can execute commands in a Kubernetes container. * @@ -2361,10 +2495,10 @@ public static ExecResult exec(V1Pod pod, String containerName, boolean redirectT */ public static KubernetesExec createKubernetesExec(V1Pod pod, String containerName) { return new KubernetesExec() - .apiClient(apiClient) // the Kubernetes api client to dispatch the "exec" command - .pod(pod) // The pod where the command is to be run - .containerName(containerName) // the container in which the command is to be run - .passStdinAsStream(); // pass a stdin stream into the container + .apiClient(apiClient) // the Kubernetes api client to dispatch the "exec" command + .pod(pod) // The pod where the command is to be run + .containerName(containerName) // the container in which the command is to be run + .passStdinAsStream(); // pass a stdin stream into the container } /** @@ -2376,16 +2510,16 @@ public static KubernetesExec createKubernetesExec(V1Pod pod, String containerNam * @throws ApiException if Kubernetes client API call fails */ public static NetworkingV1beta1Ingress createIngress(String namespace, NetworkingV1beta1Ingress ingressBody) - throws ApiException { + throws ApiException { NetworkingV1beta1Ingress ingress; try { NetworkingV1beta1Api apiInstance = new NetworkingV1beta1Api(apiClient); ingress = apiInstance.createNamespacedIngress( - namespace, //namespace - ingressBody, // NetworkingV1beta1Ingress object, representing the ingress details - PRETTY, // pretty print output - null, // when present, indicates that modifications should not be persisted - null // a name associated with the actor or entity that is making these changes + namespace, //namespace + ingressBody, // NetworkingV1beta1Ingress object, representing the ingress details + PRETTY, // pretty print output + null, // when present, indicates that modifications should not be persisted + null // a name associated with the actor or entity that is making these changes ); } catch (ApiException apex) { getLogger().warning(apex.getResponseBody()); @@ -2400,7 +2534,7 @@ public static NetworkingV1beta1Ingress createIngress(String namespace, Networkin private static String readExecCmdData(InputStream is) { StringBuilder sb = new StringBuilder(); try (BufferedReader reader = new BufferedReader( - new InputStreamReader(is, Charsets.UTF_8))) { + new InputStreamReader(is, Charsets.UTF_8))) { int c = 0; while ((c = reader.read()) != -1) { sb.append((char) c); @@ -2455,4 +2589,4 @@ public InputStream getInputStream() { return new ByteArrayInputStream(copy.toByteArray()); } } -} +} \ No newline at end of file diff --git a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/WebLogicImageTool.java b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/WebLogicImageTool.java index 280802375cf..d3e9f951173 100644 --- a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/WebLogicImageTool.java +++ b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/actions/impl/primitive/WebLogicImageTool.java @@ -95,7 +95,8 @@ private String buildiWitCommand() { + " update " + " --tag " + params.modelImageName() + ":" + params.modelImageTag() + " --fromImage " + params.baseImageName() + ":" + params.baseImageTag() - + " --wdtDomainType " + params.domainType(); + + " --wdtDomainType " + params.domainType() + + " --chown oracle:root"; if (params.wdtModelOnly()) { command += " --wdtModelOnly "; diff --git a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/extensions/ImageBuilders.java b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/extensions/ImageBuilders.java index 4d79040fbef..fd782978bdb 100644 --- a/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/extensions/ImageBuilders.java +++ b/new-integration-tests/src/test/java/oracle/weblogic/kubernetes/extensions/ImageBuilders.java @@ -3,6 +3,7 @@ package oracle.weblogic.kubernetes.extensions; + import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; @@ -13,6 +14,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Handler; @@ -25,9 +27,12 @@ import oracle.weblogic.kubernetes.logging.LoggingFacade; import oracle.weblogic.kubernetes.utils.ExecCommand; import oracle.weblogic.kubernetes.utils.ExecResult; +import org.awaitility.core.ConditionFactory; import org.junit.jupiter.api.extension.BeforeAllCallback; import org.junit.jupiter.api.extension.ExtensionContext; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.SECONDS; import static oracle.weblogic.kubernetes.TestConstants.DB_IMAGE_NAME; import static oracle.weblogic.kubernetes.TestConstants.DB_IMAGE_TAG; import static oracle.weblogic.kubernetes.TestConstants.JRF_BASE_IMAGE_NAME; @@ -76,6 +81,7 @@ import static oracle.weblogic.kubernetes.utils.IstioUtils.installIstio; import static oracle.weblogic.kubernetes.utils.IstioUtils.uninstallIstio; import static oracle.weblogic.kubernetes.utils.ThreadSafeLogger.getLogger; +import static org.awaitility.Awaitility.with; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.extension.ExtensionContext.Namespace.GLOBAL; @@ -91,7 +97,12 @@ public class ImageBuilders implements BeforeAllCallback, ExtensionContext.Store. private static String wdtBasicImage; private static Collection pushedImages = new ArrayList<>(); + private static boolean isInitializationSuccessful = false; + ConditionFactory withStandardRetryPolicy + = with().pollDelay(0, SECONDS) + .and().with().pollInterval(10, SECONDS) + .atMost(30, MINUTES).await(); @Override public void beforeAll(ExtensionContext context) { @@ -122,23 +133,67 @@ public void beforeAll(ExtensionContext context) { operatorImage = Operator.getImageName(); logger.info("Operator image name {0}", operatorImage); assertFalse(operatorImage.isEmpty(), "Image name can not be empty"); - assertTrue(Operator.buildImage(operatorImage)); + assertTrue(Operator.buildImage(operatorImage), "docker build failed for Operator"); + + // docker login to OCR if OCR_USERNAME and OCR_PASSWORD is provided in env var + if (!OCR_USERNAME.equals(REPO_DUMMY_VALUE)) { + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for docker login to be successful" + + "(elapsed time {0} ms, remaining time {1} ms)", + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(() -> dockerLogin(OCR_REGISTRY, OCR_USERNAME, OCR_PASSWORD)); + } + + // The following code is for pulling WLS images if running tests in Kind cluster + if (KIND_REPO != null) { + // The kind clusters can't pull images from OCR using the image pull secret. + // It may be a containerd bug. We are going to workaround this issue. + // The workaround will be to: + // 1. docker login + // 2. docker pull + // 3. docker tag with the KIND_REPO value + // 4. docker push this new image name + // 5. use this image name to create the domain resource + Collection images = new ArrayList<>(); + images.add(WLS_BASE_IMAGE_NAME + ":" + WLS_BASE_IMAGE_TAG); + images.add(JRF_BASE_IMAGE_NAME + ":" + JRF_BASE_IMAGE_TAG); + images.add(DB_IMAGE_NAME + ":" + DB_IMAGE_TAG); + + for (String image : images) { + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for pullImageFromOcrAndPushToKind for image {0} to be successful" + + "(elapsed time {1} ms, remaining time {2} ms)", image, + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(pullImageFromOcrAndPushToKind(image)); + } + } if (System.getenv("SKIP_BASIC_IMAGE_BUILD") == null) { // build MII basic image miiBasicImage = MII_BASIC_IMAGE_NAME + ":" + MII_BASIC_IMAGE_TAG; - assertTrue(createBasicImage(MII_BASIC_IMAGE_NAME, MII_BASIC_IMAGE_TAG, MII_BASIC_WDT_MODEL_FILE, - null, MII_BASIC_APP_NAME, MII_BASIC_IMAGE_DOMAINTYPE), - String.format("Failed to create the image %s using WebLogic Image Tool", - miiBasicImage)); + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for createBasicImage to be successful" + + "(elapsed time {0} ms, remaining time {1} ms)", + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(createBasicImage(MII_BASIC_IMAGE_NAME, MII_BASIC_IMAGE_TAG, MII_BASIC_WDT_MODEL_FILE, + null, MII_BASIC_APP_NAME, MII_BASIC_IMAGE_DOMAINTYPE)); // build basic wdt-domain-in-image image wdtBasicImage = WDT_BASIC_IMAGE_NAME + ":" + WDT_BASIC_IMAGE_TAG; - assertTrue(createBasicImage(WDT_BASIC_IMAGE_NAME, WDT_BASIC_IMAGE_TAG, WDT_BASIC_MODEL_FILE, - WDT_BASIC_MODEL_PROPERTIES_FILE, WDT_BASIC_APP_NAME, WDT_BASIC_IMAGE_DOMAINTYPE), - String.format("Failed to create the image %s using WebLogic Image Tool", - wdtBasicImage)); - + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for createBasicImage to be successful" + + "(elapsed time {0} ms, remaining time {1} ms)", + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(createBasicImage(WDT_BASIC_IMAGE_NAME, WDT_BASIC_IMAGE_TAG, WDT_BASIC_MODEL_FILE, + WDT_BASIC_MODEL_PROPERTIES_FILE, WDT_BASIC_APP_NAME, WDT_BASIC_IMAGE_DOMAINTYPE)); /* Check image exists using docker images | grep image tag. * Tag name is unique as it contains date and timestamp. @@ -147,48 +202,49 @@ public void beforeAll(ExtensionContext context) { * the test fails even though the image exists. */ assertTrue(doesImageExist(MII_BASIC_IMAGE_TAG), - String.format("Image %s doesn't exist", miiBasicImage)); + String.format("Image %s doesn't exist", miiBasicImage)); assertTrue(doesImageExist(WDT_BASIC_IMAGE_TAG), - String.format("Image %s doesn't exist", wdtBasicImage)); + String.format("Image %s doesn't exist", wdtBasicImage)); if (!REPO_USERNAME.equals(REPO_DUMMY_VALUE)) { logger.info("docker login"); - assertTrue(dockerLogin(REPO_REGISTRY, REPO_USERNAME, REPO_PASSWORD), "docker login failed"); + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for docker login to be successful" + + "(elapsed time {0} ms, remaining time {1} ms)", + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(() -> dockerLogin(REPO_REGISTRY, REPO_USERNAME, REPO_PASSWORD)); } } - // push the image + + // push the images to repo if (!REPO_NAME.isEmpty()) { - logger.info("docker push image {0} to {1}", operatorImage, REPO_NAME); - assertTrue(dockerPush(operatorImage), String.format("docker push failed for image %s", operatorImage)); + List images = new ArrayList<>(); + images.add(operatorImage); + // add images only if SKIP_BASIC_IMAGE_BUILD is not set if (System.getenv("SKIP_BASIC_IMAGE_BUILD") == null) { - logger.info("docker push mii basic image {0} to registry", miiBasicImage); - assertTrue(dockerPush(miiBasicImage), String.format("docker push failed for image %s", miiBasicImage)); + images.add(miiBasicImage); + images.add(wdtBasicImage); + } - logger.info("docker push wdt basic domain in image {0} to registry", wdtBasicImage); - assertTrue(dockerPush(wdtBasicImage), String.format("docker push failed for image %s", wdtBasicImage)); + for (String image : images) { + logger.info("docker push image {0} to {1}", image, REPO_NAME); + withStandardRetryPolicy + .conditionEvaluationListener( + condition -> logger.info("Waiting for docker push for image {0} to be successful" + + "(elapsed time {1} ms, remaining time {2} ms)", + image, + condition.getElapsedTimeInMS(), + condition.getRemainingTimeInMS())) + .until(() -> dockerPush(image)); } } - // The following code is for pulling WLS images if running tests in Kind cluster - if (KIND_REPO != null) { - // We can't figure out why the kind clusters can't pull images from OCR using the image pull secret. There - // is some evidence it may be a containerd bug. Therefore, we are going to "give up" and workaround the issue. - // The workaround will be to: - // 1. docker login - // 2. docker pull - // 3. docker tag with the KIND_REPO value - // 4. docker push this new image name - // 5. use this image name to create the domain resource - Collection images = new ArrayList<>(); - images.add(WLS_BASE_IMAGE_NAME + ":" + WLS_BASE_IMAGE_TAG); - images.add(JRF_BASE_IMAGE_NAME + ":" + JRF_BASE_IMAGE_TAG); - images.add(DB_IMAGE_NAME + ":" + DB_IMAGE_TAG); - - assertTrue(dockerLogin(OCR_REGISTRY, OCR_USERNAME, OCR_PASSWORD), "docker login failed"); - pullImageFromOcrAndPushToKind(images); - } + // set initialization success to true, not counting the istio installation as not all tests use istio + isInitializationSuccessful = true; logger.info("Installing istio before any test suites are run"); installIstio(); } finally { @@ -206,6 +262,12 @@ public void beforeAll(ExtensionContext context) { throw new IllegalStateException(e); } } + + // check initialization is already done and is not successful + assertTrue(started.get() && isInitializationSuccessful, + "Initialization(pull images from OCR or login/push to OCIR) failed, " + + "check the actual error or stack trace in the first test that failed in the test suite"); + } /** @@ -272,8 +334,8 @@ private void deleteImageOcir(String token, String imageName) { String tenancy = imageName.substring(firstSlashIdx + 1, secondSlashIdx); String imageAndTag = imageName.substring(secondSlashIdx + 1); String curlCmd = "curl -skL -X \"DELETE\" -H \"Authorization: Bearer " + token - + "\" \"https://" + registry + "/20180419/docker/images/" - + tenancy + "/" + imageAndTag.replace(':', '/') + "\""; + + "\" \"https://" + registry + "/20180419/docker/images/" + + tenancy + "/" + imageAndTag.replace(':', '/') + "\""; logger.info("About to invoke: " + curlCmd); ExecResult result = null; try { @@ -313,15 +375,15 @@ private void deleteImageOcir(String token, String imageName) { // Delete the repository curlCmd = - "curl -skL -X \"DELETE\" -H \"Authorization: Bearer " - + token - + "\" \"https://" - + registry - + "/20180419/docker/repos/" - + tenancy - + "/" - + repo - + "\""; + "curl -skL -X \"DELETE\" -H \"Authorization: Bearer " + + token + + "\" \"https://" + + registry + + "/20180419/docker/repos/" + + tenancy + + "/" + + repo + + "\""; logger.info("About to invoke: " + curlCmd); result = null; try { @@ -356,77 +418,77 @@ private void deleteImageOcir(String token, String imageName) { * @param domainType domain type to be built * @return true if image is created successfully */ - private boolean createBasicImage(String imageName, String imageTag, String modelFile, String varFile, - String appName, String domainType) { - LoggingFacade logger = getLogger(); - final String image = imageName + ":" + imageTag; - - // build the model file list - final List modelList = Collections.singletonList(MODEL_DIR + "/" + modelFile); - - // build an application archive using what is in resources/apps/APP_NAME - logger.info("Build an application archive using resources/apps/{0}", appName); - assertTrue(buildAppArchive(defaultAppParams() - .srcDirList(Collections.singletonList(appName))), - String.format("Failed to create app archive for %s", appName)); - - // build the archive list - String zipFile = String.format("%s/%s.zip", ARCHIVE_DIR, appName); - final List archiveList = Collections.singletonList(zipFile); - - // Set additional environment variables for WIT - checkDirectory(WIT_BUILD_DIR); - Map env = new HashMap<>(); - env.put("WLSIMG_BLDDIR", WIT_BUILD_DIR); - - // For k8s 1.16 support and as of May 6, 2020, we presently need a different JDK for these - // tests and for image tool. This is expected to no longer be necessary once JDK 11.0.8 or - // the next JDK 14 versions are released. - String witJavaHome = System.getenv("WIT_JAVA_HOME"); - if (witJavaHome != null) { - env.put("JAVA_HOME", witJavaHome); - } - // build an image using WebLogic Image Tool - boolean imageCreation = false; - logger.info("Create image {0} using model directory {1}", image, MODEL_DIR); - if (domainType.equalsIgnoreCase("wdt")) { - final List modelVarList = Collections.singletonList(MODEL_DIR + "/" + varFile); - imageCreation = createImage( - defaultWitParams() - .modelImageName(imageName) - .modelImageTag(WDT_BASIC_IMAGE_TAG) - .modelFiles(modelList) - .modelArchiveFiles(archiveList) - .modelVariableFiles(modelVarList) - .domainHome(WDT_BASIC_IMAGE_DOMAINHOME) - .wdtOperation("CREATE") - .wdtVersion(WDT_VERSION) - .env(env) - .redirect(true)); - } else if (domainType.equalsIgnoreCase("mii")) { - imageCreation = createImage( - defaultWitParams() - .modelImageName(imageName) - .modelImageTag(MII_BASIC_IMAGE_TAG) - .modelFiles(modelList) - .modelArchiveFiles(archiveList) - .wdtModelOnly(true) - .wdtVersion(WDT_VERSION) - .env(env) - .redirect(true)); - } - return imageCreation; + public Callable createBasicImage(String imageName, String imageTag, String modelFile, String varFile, + String appName, String domainType) { + return (() -> { + LoggingFacade logger = getLogger(); + final String image = imageName + ":" + imageTag; + + // build the model file list + final List modelList = Collections.singletonList(MODEL_DIR + "/" + modelFile); + + // build an application archive using what is in resources/apps/APP_NAME + logger.info("Build an application archive using resources/apps/{0}", appName); + assertTrue(buildAppArchive(defaultAppParams() + .srcDirList(Collections.singletonList(appName))), + String.format("Failed to create app archive for %s", appName)); + + // build the archive list + String zipFile = String.format("%s/%s.zip", ARCHIVE_DIR, appName); + final List archiveList = Collections.singletonList(zipFile); + + // Set additional environment variables for WIT + checkDirectory(WIT_BUILD_DIR); + Map env = new HashMap<>(); + env.put("WLSIMG_BLDDIR", WIT_BUILD_DIR); + + // For k8s 1.16 support and as of May 6, 2020, we presently need a different JDK for these + // tests and for image tool. This is expected to no longer be necessary once JDK 11.0.8 or + // the next JDK 14 versions are released. + String witJavaHome = System.getenv("WIT_JAVA_HOME"); + if (witJavaHome != null) { + env.put("JAVA_HOME", witJavaHome); + } + + // build an image using WebLogic Image Tool + boolean imageCreation = false; + logger.info("Create image {0} using model directory {1}", image, MODEL_DIR); + if (domainType.equalsIgnoreCase("wdt")) { + final List modelVarList = Collections.singletonList(MODEL_DIR + "/" + varFile); + imageCreation = createImage( + defaultWitParams() + .modelImageName(imageName) + .modelImageTag(WDT_BASIC_IMAGE_TAG) + .modelFiles(modelList) + .modelArchiveFiles(archiveList) + .modelVariableFiles(modelVarList) + .domainHome(WDT_BASIC_IMAGE_DOMAINHOME) + .wdtOperation("CREATE") + .wdtVersion(WDT_VERSION) + .env(env) + .redirect(true)); + } else if (domainType.equalsIgnoreCase("mii")) { + imageCreation = createImage( + defaultWitParams() + .modelImageName(imageName) + .modelImageTag(MII_BASIC_IMAGE_TAG) + .modelFiles(modelList) + .modelArchiveFiles(archiveList) + .wdtModelOnly(true) + .wdtVersion(WDT_VERSION) + .env(env) + .redirect(true)); + } + return imageCreation; + }); } - private void pullImageFromOcrAndPushToKind(Collection imagesList) { - for (String image : imagesList) { - assertTrue(dockerPull(image), String.format("docker pull failed for image %s", image)); + private Callable pullImageFromOcrAndPushToKind(String image) { + return (() -> { String kindRepoImage = KIND_REPO + image.substring(TestConstants.OCR_REGISTRY.length() + 1); - assertTrue(dockerTag(image, kindRepoImage), - String.format("docker tag failed for images %s, %s", image, kindRepoImage)); - assertTrue(dockerPush(kindRepoImage), String.format("docker push failed for image %s", kindRepoImage)); - } + return dockerPull(image) && dockerTag(image, kindRepoImage) && dockerPush(kindRepoImage); + }); } -} +} \ No newline at end of file diff --git a/new-integration-tests/src/test/resources/loggingexporter/copy-logging-files-cmds.txt b/new-integration-tests/src/test/resources/loggingexporter/copy-logging-files-cmds.txt new file mode 100644 index 00000000000..2e487838d02 --- /dev/null +++ b/new-integration-tests/src/test/resources/loggingexporter/copy-logging-files-cmds.txt @@ -0,0 +1,7 @@ +[final-build-commands] + +RUN mkdir -p /u01/domains/elk-domain1/config && chown oracle:root /u01/domains/elk-domain1/config +COPY --chown=oracle:root files/WebLogicLoggingExporter.yaml /u01/domains/elk-domain1/config/ + +RUN mkdir -p /u01/domains/elk-domain1/lib && chown oracle:root /u01/domains/elk-domain1/lib +COPY --chown=oracle:root files/SNAKEYAML_JAR files/WEBLOGICLOGGINGEXPORTER_JAR /u01/domains/elk-domain1/lib/ \ No newline at end of file diff --git a/operator/pom.xml b/operator/pom.xml index d6451c3b74f..7572bd7171a 100644 --- a/operator/pom.xml +++ b/operator/pom.xml @@ -7,7 +7,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 weblogic-kubernetes-operator diff --git a/operator/src/main/java/oracle/kubernetes/operator/DomainProcessorImpl.java b/operator/src/main/java/oracle/kubernetes/operator/DomainProcessorImpl.java index c701831f9cc..8538d4bb321 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/DomainProcessorImpl.java +++ b/operator/src/main/java/oracle/kubernetes/operator/DomainProcessorImpl.java @@ -22,6 +22,7 @@ import io.kubernetes.client.openapi.models.V1ObjectMeta; import io.kubernetes.client.openapi.models.V1ObjectReference; import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodCondition; import io.kubernetes.client.openapi.models.V1PodList; import io.kubernetes.client.openapi.models.V1PodStatus; import io.kubernetes.client.openapi.models.V1Service; @@ -34,7 +35,6 @@ import oracle.kubernetes.operator.helpers.CallBuilder; import oracle.kubernetes.operator.helpers.ConfigMapHelper; import oracle.kubernetes.operator.helpers.DomainPresenceInfo; -import oracle.kubernetes.operator.helpers.DomainStatusPatch; import oracle.kubernetes.operator.helpers.DomainValidationSteps; import oracle.kubernetes.operator.helpers.JobHelper; import oracle.kubernetes.operator.helpers.KubernetesUtils; @@ -64,11 +64,17 @@ import oracle.kubernetes.weblogic.domain.model.AdminService; import oracle.kubernetes.weblogic.domain.model.Channel; import oracle.kubernetes.weblogic.domain.model.Domain; +import oracle.kubernetes.weblogic.domain.model.DomainStatus; +import oracle.kubernetes.weblogic.domain.model.ServerHealth; +import oracle.kubernetes.weblogic.domain.model.ServerStatus; -import static oracle.kubernetes.operator.DomainStatusUpdater.INSPECTING_DOMAIN_PROGRESS_REASON; +import static oracle.kubernetes.operator.DomainStatusUpdater.ADMIN_SERVER_STARTING_PROGRESS_REASON; +import static oracle.kubernetes.operator.DomainStatusUpdater.MANAGED_SERVERS_STARTING_PROGRESS_REASON; import static oracle.kubernetes.operator.LabelConstants.INTROSPECTION_STATE_LABEL; import static oracle.kubernetes.operator.ProcessingConstants.DOMAIN_INTROSPECT_REQUESTED; import static oracle.kubernetes.operator.ProcessingConstants.MAKE_RIGHT_DOMAIN_OPERATION; +import static oracle.kubernetes.operator.ProcessingConstants.SERVER_HEALTH_MAP; +import static oracle.kubernetes.operator.ProcessingConstants.SERVER_STATE_MAP; import static oracle.kubernetes.operator.helpers.LegalNames.toJobIntrospectorName; public class DomainProcessorImpl implements DomainProcessor { @@ -209,7 +215,8 @@ private static Step bringAdminServerUpSteps( } private static Step bringManagedServersUp(Step next) { - return new ManagedServersUpStep(next); + return DomainStatusUpdater.createProgressingStep(MANAGED_SERVERS_STARTING_PROGRESS_REASON, true, + new ManagedServersUpStep(next)); } private FiberGate getMakeRightFiberGate(String ns) { @@ -253,7 +260,7 @@ public void reportSuspendedFibers() { gate.getCurrentFibers().forEach( (key, fiber) -> { Optional.ofNullable(fiber.getSuspendedStep()).ifPresent(suspendedStep -> { - try (LoggingContext stack + try (LoggingContext ignored = LoggingContext.setThreadContext().namespace(namespace).domainUid(getDomainUid(fiber))) { LOGGER.fine("Fiber is SUSPENDED at " + suspendedStep.getName()); } @@ -341,7 +348,8 @@ private void processIntrospectorJobPodWatch(V1Pod pod, String watchType) { switch (watchType) { case "ADDED": case "MODIFIED": - new DomainStatusUpdate(info.getDomain(), pod, domainUid).invoke(); + PodWatcher.PodStatus podStatus = PodWatcher.getPodStatus(pod); + new DomainStatusUpdate(pod, domainUid, delegate, info, podStatus).invoke(); break; default: } @@ -511,7 +519,11 @@ public void onThrowable(Packet packet, Throwable throwable) { } }); } catch (Throwable t) { - LOGGER.severe(MessageKeys.EXCEPTION, t); + try (LoggingContext ignored + = LoggingContext.setThreadContext() + .namespace(info.getNamespace()).domainUid(info.getDomainUid())) { + LOGGER.severe(MessageKeys.EXCEPTION, t); + } } }, main.initialShortDelay, @@ -541,6 +553,49 @@ public MakeRightDomainOperation createMakeRightOperation(Domain liveDomain) { return createMakeRightOperation(new DomainPresenceInfo(liveDomain)); } + public Step createPopulatePacketServerMapsStep(oracle.kubernetes.operator.work.Step next) { + return new PopulatePacketServerMapsStep(next); + } + + public static class PopulatePacketServerMapsStep extends Step { + public PopulatePacketServerMapsStep(Step next) { + super(next); + } + + @Override + public NextAction apply(Packet packet) { + populatePacketServerMapsFromDomain(packet); + return doNext(packet); + } + + private void populatePacketServerMapsFromDomain(Packet packet) { + Map serverHealth = new ConcurrentHashMap<>(); + Map serverState = new ConcurrentHashMap<>(); + Optional.ofNullable(packet.getSpi(DomainPresenceInfo.class)) + .map(DomainPresenceInfo::getDomain) + .map(Domain::getStatus) + .map(DomainStatus::getServers) + .ifPresent(servers -> servers.forEach(item -> addServerToMaps(serverHealth, serverState, item))); + if (!serverState.isEmpty()) { + packet.put(SERVER_STATE_MAP, serverState); + } + if (!serverHealth.isEmpty()) { + packet.put(SERVER_HEALTH_MAP, serverHealth); + } + } + + private void addServerToMaps(Map serverHealthMap, + Map serverStateMap, ServerStatus item) { + if (item.getHealth() != null) { + serverHealthMap.put(item.getServerName(), item.getHealth()); + } + if (item.getState() != null) { + serverStateMap.put(item.getServerName(), item.getState()); + } + } + + } + /** * A factory which creates and executes steps to align the cached domain status with the value read from Kubernetes. */ @@ -648,16 +703,21 @@ private void internalMakeRightDomainPresence() { Component.createFor(liveInfo, delegate.getVersion(), PodAwaiterStepFactory.class, delegate.getPodAwaiterStepFactory(getNamespace()), V1SubjectRulesReviewStatus.class, delegate.getSubjectRulesReviewStatus(getNamespace()))); - runDomainPlan( getDomain(), getDomainUid(), getNamespace(), - new StepAndPacket(createSteps(), packet), + createDomainPlanSteps(packet), deleting, willInterrupt); } + private StepAndPacket createDomainPlanSteps(Packet packet) { + return new StepAndPacket( + createPopulatePacketServerMapsStep(createSteps()), + packet); + } + private Domain getDomain() { return liveInfo.getDomain(); } @@ -748,7 +808,8 @@ public void onThrowable(Packet packet, Throwable throwable) { () -> { DomainPresenceInfo existing = getExistingDomainPresenceInfo(ns, domainUid); if (existing != null) { - try (LoggingContext stack = LoggingContext.setThreadContext().namespace(ns)) { + try (LoggingContext ignored = + LoggingContext.setThreadContext().namespace(ns).domainUid(domainUid)) { existing.setPopulated(false); // proceed only if we have not already retried max number of times int retryCount = existing.incrementAndGetFailureCount(); @@ -783,20 +844,19 @@ public void onThrowable(Packet packet, Throwable throwable) { } Step createDomainUpPlan(DomainPresenceInfo info) { - Step managedServerStrategy = - bringManagedServersUp(DomainStatusUpdater.createEndProgressingStep(new TailStep())); + Step managedServerStrategy = bringManagedServersUp(DomainStatusUpdater.createEndProgressingStep(new TailStep())); Step domainUpStrategy = Step.chain( domainIntrospectionSteps(info), new DomainStatusStep(info, null), + DomainStatusUpdater.createProgressingStep(ADMIN_SERVER_STARTING_PROGRESS_REASON,true, null), bringAdminServerUp(info, delegate.getPodAwaiterStepFactory(info.getNamespace())), managedServerStrategy); return Step.chain( createDomainUpInitialStep(info), ConfigMapHelper.readExistingIntrospectorConfigMap(info.getNamespace(), info.getDomainUid()), - DomainStatusUpdater.createProgressingStep(INSPECTING_DOMAIN_PROGRESS_REASON,true, null), DomainPresenceStep.createDomainPresenceStep(info.getDomain(), domainUpStrategy, managedServerStrategy)); } @@ -974,42 +1034,87 @@ public NextAction apply(Packet packet) { } private static class DomainStatusUpdate { - private final Domain domain; private final V1Pod pod; private final String domainUid; + private DomainProcessorDelegate delegate = null; + private DomainPresenceInfo info = null; + private PodWatcher.PodStatus podStatus; - DomainStatusUpdate(Domain domain, V1Pod pod, String domainUid) { - this.domain = domain; + DomainStatusUpdate(V1Pod pod, String domainUid, DomainProcessorDelegate delegate, + DomainPresenceInfo info, PodWatcher.PodStatus podStatus) { this.pod = pod; this.domainUid = domainUid; + this.delegate = delegate; + this.info = info; + this.podStatus = podStatus; } - public void invoke() { - Optional.ofNullable(getMatchingContainerStatus()) - .map(V1ContainerStatus::getState) - .map(V1ContainerState::getWaiting) - .ifPresent(waiting -> updateStatus(waiting.getReason(), waiting.getMessage())); - } - - private void updateStatus(String reason, String message) { - if (reason == null || message == null) { - return; + private void invoke() { + switch (podStatus) { + case PHASE_FAILED: + delegate.runSteps( + DomainStatusUpdater.createFailedStep( + info, pod.getStatus().getReason(), pod.getStatus().getMessage(), null)); + break; + case WAITING_NON_NULL_MESSAGE: + Optional.ofNullable(getMatchingContainerStatus()) + .map(V1ContainerStatus::getState) + .map(V1ContainerState::getWaiting) + .ifPresent(waiting -> + delegate.runSteps( + DomainStatusUpdater.createFailedStep( + info, waiting.getReason(), waiting.getMessage(), null))); + break; + case TERMINATED_ERROR_REASON: + Optional.ofNullable(getMatchingContainerStatus()) + .map(V1ContainerStatus::getState) + .map(V1ContainerState::getTerminated) + .ifPresent(terminated -> delegate.runSteps( + DomainStatusUpdater.createFailedStep( + info, terminated.getReason(), terminated.getMessage(), null))); + break; + case UNSCHEDULABLE: + Optional.ofNullable(getMatchingPodCondition()) + .ifPresent(condition -> + delegate.runSteps( + DomainStatusUpdater.createFailedStep( + info, condition.getReason(), condition.getMessage(), null))); + break; + case SUCCESS: + Optional.ofNullable(getMatchingContainerStatus()) + .map(V1ContainerStatus::getState) + .map(V1ContainerState::getWaiting) + .ifPresent(waiting -> + delegate.runSteps( + DomainStatusUpdater.createProgressingStep( + info, waiting.getReason(), false, null))); + break; + default: } - - DomainStatusPatch.updateSynchronously(domain, reason, message); } private V1ContainerStatus getMatchingContainerStatus() { return Optional.ofNullable(pod.getStatus()) - .map(V1PodStatus::getContainerStatuses) - .flatMap(this::getMatchingContainerStatus) - .orElse(null); + .map(V1PodStatus::getContainerStatuses) + .flatMap(this::getMatchingContainerStatus) + .orElse(null); } private Optional getMatchingContainerStatus(Collection statuses) { return statuses.stream().filter(this::hasInstrospectorJobName).findFirst(); } + private V1PodCondition getMatchingPodCondition() { + return Optional.ofNullable(pod.getStatus()) + .map(V1PodStatus::getConditions) + .flatMap(this::getPodCondition) + .orElse(null); + } + + private Optional getPodCondition(Collection conditions) { + return conditions.stream().findFirst(); + } + private boolean hasInstrospectorJobName(V1ContainerStatus s) { return toJobIntrospectorName(domainUid).equals(s.getName()); } diff --git a/operator/src/main/java/oracle/kubernetes/operator/DomainStatusUpdater.java b/operator/src/main/java/oracle/kubernetes/operator/DomainStatusUpdater.java index c7818fed6cb..a3ce154a9a5 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/DomainStatusUpdater.java +++ b/operator/src/main/java/oracle/kubernetes/operator/DomainStatusUpdater.java @@ -37,6 +37,7 @@ import oracle.kubernetes.operator.steps.DefaultResponseStep; import oracle.kubernetes.operator.wlsconfig.WlsClusterConfig; import oracle.kubernetes.operator.wlsconfig.WlsDomainConfig; +import oracle.kubernetes.operator.work.Component; import oracle.kubernetes.operator.work.NextAction; import oracle.kubernetes.operator.work.Packet; import oracle.kubernetes.operator.work.Step; @@ -65,6 +66,7 @@ @SuppressWarnings("WeakerAccess") public class DomainStatusUpdater { public static final String INSPECTING_DOMAIN_PROGRESS_REASON = "InspectingDomainPresence"; + public static final String ADMIN_SERVER_STARTING_PROGRESS_REASON = "AdminServerStarting"; public static final String MANAGED_SERVERS_STARTING_PROGRESS_REASON = "ManagedServersStarting"; public static final String SERVERS_READY_REASON = "ServersReady"; public static final String ALL_STOPPED_AVAILABLE_REASON = "AllServersStopped"; @@ -97,7 +99,21 @@ public static Step createStatusUpdateStep(Step next) { * @return Step */ public static Step createProgressingStep(String reason, boolean isPreserveAvailable, Step next) { - return new ProgressingStep(reason, isPreserveAvailable, next); + return new ProgressingStep(null, reason, isPreserveAvailable, next); + } + + /** + * Asynchronous step to set Domain condition to Progressing. + * + * @param info Domain presence info + * @param reason Progressing reason + * @param isPreserveAvailable true, if existing Available=True condition should be preserved + * @param next Next step + * @return Step + */ + public static Step createProgressingStep(DomainPresenceInfo info, String reason, boolean isPreserveAvailable, + Step next) { + return new ProgressingStep(info, reason, isPreserveAvailable, next); } /** @@ -148,7 +164,8 @@ public static Step createFailedStep(CallResponse callResponse, Step next) { * @return Step */ static Step createFailedStep(Throwable throwable, Step next) { - return createFailedStep("Exception", throwable.getMessage(), next); + return throwable.getMessage() == null ? createFailedStep("Exception", throwable.toString(), next) + : createFailedStep("Exception", throwable.getMessage(), next); } /** @@ -160,10 +177,24 @@ static Step createFailedStep(Throwable throwable, Step next) { * @return Step */ public static Step createFailedStep(String reason, String message, Step next) { - return new FailedStep(reason, message, next); + return new FailedStep(null, reason, message, next); + } + + /** + * Asynchronous step to set Domain condition to Failed. + * + * @param info Domain presence info + * @param reason the reason for the failure + * @param message a fuller description of the problem + * @param next Next step + * @return Step + */ + public static Step createFailedStep(DomainPresenceInfo info, String reason, String message, Step next) { + return new FailedStep(info, reason, message, next); } abstract static class DomainStatusUpdaterStep extends Step { + private DomainPresenceInfo info = null; DomainStatusUpdaterStep(Step next) { super(next); @@ -177,6 +208,14 @@ DomainStatusUpdaterContext createContext(Packet packet) { @Override public NextAction apply(Packet packet) { + if ((packet.getSpi(DomainPresenceInfo.class) == null) + && (info != null)) { + packet + .getComponents() + .put( + ProcessingConstants.DOMAIN_COMPONENT_NAME, + Component.createFor(info)); + } DomainStatusUpdaterContext context = createContext(packet); DomainStatus newStatus = context.getNewStatus(); @@ -552,12 +591,13 @@ private Integer getClusterSizeGoal(String clusterName) { } } - private static class ProgressingStep extends DomainStatusUpdaterStep { + public static class ProgressingStep extends DomainStatusUpdaterStep { private final String reason; private final boolean isPreserveAvailable; - private ProgressingStep(String reason, boolean isPreserveAvailable, Step next) { + private ProgressingStep(DomainPresenceInfo info, String reason, boolean isPreserveAvailable, Step next) { super(next); + super.info = info; this.reason = reason; this.isPreserveAvailable = isPreserveAvailable; } @@ -602,8 +642,9 @@ private static class FailedStep extends DomainStatusUpdaterStep { private final String reason; private final String message; - private FailedStep(String reason, String message, Step next) { + private FailedStep(DomainPresenceInfo info, String reason, String message, Step next) { super(next); + super.info = info; this.reason = reason; this.message = message; } diff --git a/operator/src/main/java/oracle/kubernetes/operator/JobWatcher.java b/operator/src/main/java/oracle/kubernetes/operator/JobWatcher.java index d570b07b530..3047a864802 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/JobWatcher.java +++ b/operator/src/main/java/oracle/kubernetes/operator/JobWatcher.java @@ -305,7 +305,8 @@ void updatePacket(Packet packet, V1Job job) { // be available for reading @Override boolean shouldTerminateFiber(V1Job job) { - return isFailed(job) && "DeadlineExceeded".equals(getFailedReason(job)); + return isFailed(job) && ("DeadlineExceeded".equals(getFailedReason(job)) + || "BackoffLimitExceeded".equals(getFailedReason(job))); } // create an exception to terminate the fiber diff --git a/operator/src/main/java/oracle/kubernetes/operator/Main.java b/operator/src/main/java/oracle/kubernetes/operator/Main.java index 9799ce7cbae..0e79b3daf66 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/Main.java +++ b/operator/src/main/java/oracle/kubernetes/operator/Main.java @@ -752,7 +752,9 @@ public NextAction onSuccess(Packet packet, CallResponse callResponse return v; }); info.setPopulated(true); - dp.createMakeRightOperation(info).withExplicitRecheck().execute(); + try (LoggingContext stack = LoggingContext.setThreadContext().namespace(ns).domainUid(domainUid)) { + dp.createMakeRightOperation(info).withExplicitRecheck().execute(); + } } } diff --git a/operator/src/main/java/oracle/kubernetes/operator/PodWatcher.java b/operator/src/main/java/oracle/kubernetes/operator/PodWatcher.java index ba20079ea84..90cf91b0d13 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/PodWatcher.java +++ b/operator/src/main/java/oracle/kubernetes/operator/PodWatcher.java @@ -7,6 +7,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ThreadFactory; @@ -21,6 +22,7 @@ import io.kubernetes.client.openapi.models.V1ContainerStatus; import io.kubernetes.client.openapi.models.V1ObjectMeta; import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodCondition; import io.kubernetes.client.openapi.models.V1PodStatus; import io.kubernetes.client.util.Watch; import oracle.kubernetes.operator.TuningParameters.WatchTuning; @@ -45,6 +47,14 @@ public class PodWatcher extends Watcher implements WatchListener, private final String namespace; private final WatchListener listener; + public enum PodStatus { + PHASE_FAILED, + WAITING_NON_NULL_MESSAGE, + TERMINATED_ERROR_REASON, + UNSCHEDULABLE, + SUCCESS + } + // Map of Pod name to callback. Note that since each pod name can be mapped to multiple callback registrations, // a concurrent map will not suffice; we therefore use an ordinary map and synchronous accesses. private final Map>> modifiedCallbackRegistrations = new HashMap<>(); @@ -170,27 +180,58 @@ public void receivedResponse(Watch.Response item) { * @param pod pob * @return true, if failed */ - private static boolean isFailed(V1Pod pod) { - if (pod == null) { - return false; - } + static boolean isFailed(@Nonnull V1Pod pod) { - V1PodStatus status = pod.getStatus(); LOGGER.fine( - "PodWatcher.isFailed status of pod " + pod.getMetadata().getName() + ": " + status); - if (status != null) { - java.util.List conStatuses = status.getContainerStatuses(); - if (conStatuses != null) { - for (V1ContainerStatus conStatus : conStatuses) { - if (!isReady(conStatus) - && (getContainerStateWaitingMessage(conStatus) != null - || getContainerStateTerminatedReason(conStatus).contains("Error"))) { - return true; - } - } - } + "PodWatcher.isFailed status of pod " + pod.getMetadata().getName() + ": " + pod.getStatus()); + return getContainerStatuses(pod).stream().anyMatch(PodWatcher::isPodFailed); + } + + static PodStatus getPodStatus(@Nonnull V1Pod pod) { + V1ContainerStatus conStatus = getContainerStatuses(pod) + .stream() + .findFirst() + .orElse(new V1ContainerStatus()); + String phase = Optional.ofNullable(pod.getStatus()).map(V1PodStatus::getPhase).orElse(""); + if (phase.equals("Failed")) { + return PodStatus.PHASE_FAILED; + } else if (!isReady(conStatus) && getContainerStateWaitingMessage(conStatus) != null) { + return PodStatus.WAITING_NON_NULL_MESSAGE; + } else if (!isReady(conStatus) && getContainerStateTerminatedReason(conStatus).contains("Error")) { + return PodStatus.TERMINATED_ERROR_REASON; + } else if (isUnschedulable(pod)) { + return PodStatus.UNSCHEDULABLE; } - return false; + return PodStatus.SUCCESS; + } + + static List getContainerStatuses(@Nonnull V1Pod pod) { + return Optional.ofNullable(pod.getStatus()).map(V1PodStatus::getContainerStatuses).orElse(Collections.emptyList()); + } + + private static boolean isPodFailed(V1ContainerStatus conStatus) { + return + !isReady(conStatus) + && (getContainerStateWaitingMessage(conStatus) != null + || getContainerStateTerminatedReason(conStatus).contains("Error")); + } + + static boolean isUnschedulable(@Nonnull V1Pod pod) { + + LOGGER.fine("PodWatcher.isUnschedulable status of pod " + pod.getMetadata().getName() + ": " + pod.getStatus()); + return getPodConditions(pod).stream().anyMatch(PodWatcher::isPodUnschedulable); + } + + private static List getPodConditions(@Nonnull V1Pod pod) { + return Optional.ofNullable(pod.getStatus()).map(V1PodStatus::getConditions).orElse(Collections.emptyList()); + } + + private static boolean isPodUnschedulable(V1PodCondition podCondition) { + return getReason(podCondition).contains("Unschedulable"); + } + + private static String getReason(V1PodCondition podCondition) { + return Optional.ofNullable(podCondition).map(V1PodCondition::getReason).orElse(""); } private static boolean isReady(V1ContainerStatus conStatus) { diff --git a/operator/src/main/java/oracle/kubernetes/operator/helpers/JobHelper.java b/operator/src/main/java/oracle/kubernetes/operator/helpers/JobHelper.java index 3beed57d3bc..157187b7bd0 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/helpers/JobHelper.java +++ b/operator/src/main/java/oracle/kubernetes/operator/helpers/JobHelper.java @@ -45,6 +45,8 @@ import oracle.kubernetes.weblogic.domain.model.ServerEnvVars; import static oracle.kubernetes.operator.DomainSourceType.FromModel; +import static oracle.kubernetes.operator.DomainStatusUpdater.INSPECTING_DOMAIN_PROGRESS_REASON; +import static oracle.kubernetes.operator.DomainStatusUpdater.createProgressingStep; import static oracle.kubernetes.operator.logging.MessageKeys.INTROSPECTOR_JOB_FAILED; import static oracle.kubernetes.operator.logging.MessageKeys.INTROSPECTOR_JOB_FAILED_DETAIL; @@ -338,10 +340,12 @@ public NextAction apply(Packet packet) { packet.putIfAbsent(START_TIME, System.currentTimeMillis()); return doNext( - context.createNewJob( - readDomainIntrospectorPodLogStep( - deleteDomainIntrospectorJobStep( - ConfigMapHelper.createIntrospectorConfigMapStep(getNext())))), + Step.chain( + createProgressingStep(info, INSPECTING_DOMAIN_PROGRESS_REASON, true, null), + context.createNewJob(null), + readDomainIntrospectorPodLogStep(null), + deleteDomainIntrospectorJobStep(null), + ConfigMapHelper.createIntrospectorConfigMapStep(getNext())), packet); } diff --git a/operator/src/main/java/oracle/kubernetes/operator/helpers/PodHelper.java b/operator/src/main/java/oracle/kubernetes/operator/helpers/PodHelper.java index 7442b5bccb9..cbc53079187 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/helpers/PodHelper.java +++ b/operator/src/main/java/oracle/kubernetes/operator/helpers/PodHelper.java @@ -34,6 +34,7 @@ import oracle.kubernetes.weblogic.domain.model.ServerSpec; import oracle.kubernetes.weblogic.domain.model.Shutdown; +import static oracle.kubernetes.operator.LabelConstants.CLUSTERNAME_LABEL; import static oracle.kubernetes.operator.ProcessingConstants.SERVERS_TO_ROLL; public class PodHelper { @@ -85,6 +86,63 @@ public static boolean isReady(V1Pod pod) { return ready; } + /** + * Get list of scheduled pods for a particular cluster or non-clustered servers. + * @param info Domain presence info + * @param clusterName cluster name of the pod server + * @return list containing scheduled pods + */ + public static List getScheduledPods(DomainPresenceInfo info, String clusterName) { + // These are presently scheduled servers + List scheduledServers = new ArrayList<>(); + for (Map.Entry entry : info.getServers().entrySet()) { + V1Pod pod = entry.getValue().getPod().get(); + if (pod != null && !PodHelper.isDeleting(pod) && PodHelper.getScheduledStatus(pod)) { + String wlsClusterName = pod.getMetadata().getLabels().get(CLUSTERNAME_LABEL); + if ((wlsClusterName == null) || (wlsClusterName.contains(clusterName))) { + scheduledServers.add(entry.getKey()); + } + } + } + return scheduledServers; + } + + /** + * Get list of ready pods for a particular cluster or non-clustered servers. + * @param info Domain presence info + * @param clusterName cluster name of the pod server + * @return list containing ready pods + */ + public static List getReadyPods(DomainPresenceInfo info, String clusterName) { + // These are presently Ready servers + List readyServers = new ArrayList<>(); + for (Map.Entry entry : info.getServers().entrySet()) { + V1Pod pod = entry.getValue().getPod().get(); + if (pod != null && !PodHelper.isDeleting(pod) && PodHelper.getReadyStatus(pod)) { + String wlsClusterName = pod.getMetadata().getLabels().get(CLUSTERNAME_LABEL); + if ((wlsClusterName == null) || (wlsClusterName.contains(clusterName))) { + readyServers.add(entry.getKey()); + } + } + } + return readyServers; + } + + /** + * get if pod is in scheduled state. + * @param pod pod + * @return true, if pod is scheduled + */ + public static boolean getScheduledStatus(V1Pod pod) { + V1PodSpec spec = pod.getSpec(); + if (spec != null) { + if (spec.getNodeName() != null) { + return true; + } + } + return false; + } + /** * get if pod is in ready state. * @param pod pod @@ -252,17 +310,23 @@ String getServerName() { return getAsName(); } + @Override + Step createProgressingStep(Step actionStep) { + return DomainStatusUpdater.createProgressingStep( + DomainStatusUpdater.ADMIN_SERVER_STARTING_PROGRESS_REASON, false, actionStep); + } + @Override Step createNewPod(Step next) { - return createPod(next); + return createProgressingStep(createPod(next)); } @Override Step replaceCurrentPod(Step next) { if (MakeRightDomainOperation.isInspectionRequired(packet)) { - return MakeRightDomainOperation.createStepsToRerunWithIntrospection(packet); + return createProgressingStep(MakeRightDomainOperation.createStepsToRerunWithIntrospection(packet)); } else { - return createCyclePodStep(next); + return createProgressingStep(createCyclePodStep(next)); } } @@ -410,7 +474,8 @@ private Map getServersToRoll() { return (Map) packet.get(SERVERS_TO_ROLL); } - private Step createProgressingStep(Step actionStep) { + @Override + Step createProgressingStep(Step actionStep) { return DomainStatusUpdater.createProgressingStep( DomainStatusUpdater.MANAGED_SERVERS_STARTING_PROGRESS_REASON, false, actionStep); } @@ -444,7 +509,7 @@ protected String getPodReplacedMessageKey() { protected V1ObjectMeta createMetadata() { V1ObjectMeta metadata = super.createMetadata(); if (getClusterName() != null) { - metadata.putLabelsItem(LabelConstants.CLUSTERNAME_LABEL, getClusterName()); + metadata.putLabelsItem(CLUSTERNAME_LABEL, getClusterName()); } return metadata; } @@ -502,7 +567,7 @@ public NextAction apply(Packet packet) { if (oldPod != null) { Map labels = oldPod.getMetadata().getLabels(); if (labels != null) { - clusterName = labels.get(LabelConstants.CLUSTERNAME_LABEL); + clusterName = labels.get(CLUSTERNAME_LABEL); } ServerSpec serverSpec = info.getDomain().getServer(serverName, clusterName); diff --git a/operator/src/main/java/oracle/kubernetes/operator/helpers/PodStepContext.java b/operator/src/main/java/oracle/kubernetes/operator/helpers/PodStepContext.java index 373777cc86a..5da0b1315d5 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/helpers/PodStepContext.java +++ b/operator/src/main/java/oracle/kubernetes/operator/helpers/PodStepContext.java @@ -322,7 +322,19 @@ private Step replacePod(Step next) { return createPodAsync(replaceResponse(next)); } + /** + * Creates a Progressing step before an action step. + * + * @param actionStep the step to perform after the ProgressingStep. + * @return a step to be scheduled. + */ + abstract Step createProgressingStep(Step actionStep); + private Step patchCurrentPod(V1Pod currentPod, Step next) { + return createProgressingStep(patchPod(currentPod, next)); + } + + protected Step patchPod(V1Pod currentPod, Step next) { JsonPatchBuilder patchBuilder = Json.createPatchBuilder(); KubernetesUtils.addPatches( @@ -331,7 +343,7 @@ private Step patchCurrentPod(V1Pod currentPod, Step next) { patchBuilder, "/metadata/annotations/", getAnnotations(currentPod), getPodAnnotations()); return new CallBuilder() - .patchPodAsync(getPodName(), getNamespace(), + .patchPodAsync(getPodName(), getNamespace(), new V1Patch(patchBuilder.build().toString()), patchResponse(next)); } diff --git a/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStep.java b/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStep.java index cabacd161f7..ef2bbab27dc 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStep.java +++ b/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStep.java @@ -4,16 +4,18 @@ package oracle.kubernetes.operator.steps; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import java.util.stream.IntStream; import oracle.kubernetes.operator.DomainStatusUpdater; import oracle.kubernetes.operator.ProcessingConstants; @@ -43,9 +45,6 @@ public class ManagedServerUpIteratorStep extends Step { private final Collection startupInfos; - private static NextStepFactory NEXT_STEP_FACTORY = - (next) -> DomainStatusUpdater.createStatusUpdateStep(next); - public ManagedServerUpIteratorStep(Collection startupInfos, Step next) { super(next); this.startupInfos = startupInfos; @@ -89,12 +88,26 @@ public NextAction apply(Packet packet) { .filter(ssi -> !isServerInCluster(ssi)) .map(ssi -> createManagedServerUpDetails(packet, ssi)).collect(Collectors.toList()); - getStartClusteredServersStepFactories(startupInfos, packet).values() - .forEach(factory -> startDetails.addAll(factory.getServerStartsStepAndPackets())); + Collection work = new ArrayList<>(); + if (!startDetails.isEmpty()) { + work.add( + new StepAndPacket( + new StartManagedServersStep(null, 0, startDetails, null), packet)); + } + + for (Map.Entry entry + : getStartClusteredServersStepFactories(startupInfos, packet).entrySet()) { + work.add( + new StepAndPacket( + new StartManagedServersStep(entry.getKey(), entry.getValue().getMaxConcurrency(), + entry.getValue().getServerStartsStepAndPackets(), null), packet.clone())); + } + + if (!work.isEmpty()) { + return doForkJoin(DomainStatusUpdater.createStatusUpdateStep(getNext()), packet, work); + } - return doNext( - NEXT_STEP_FACTORY.createStatusUpdateStep(new StartManagedServersStep(startDetails, getNext())), - packet); + return doNext(DomainStatusUpdater.createStatusUpdateStep(getNext()), packet); } @@ -142,19 +155,52 @@ private boolean isServerInCluster(ServerStartupInfo ssi) { static class StartManagedServersStep extends Step { final Collection startDetails; + final Queue startDetailsQueue = new ConcurrentLinkedQueue<>(); + final String clusterName; + final int maxConcurrency; + final AtomicInteger numStarted = new AtomicInteger(0); - StartManagedServersStep(Collection startDetails, Step next) { + StartManagedServersStep(String clusterName, int maxConcurrency, Collection startDetails, Step next) { super(next); + this.clusterName = clusterName; this.startDetails = startDetails; + this.maxConcurrency = maxConcurrency; + startDetails.forEach(this::add); } - Collection getStartDetails() { - return startDetails; + void add(StepAndPacket serverToStart) { + startDetailsQueue.add(new StepAndPacket(serverToStart.step, serverToStart.packet)); } @Override public NextAction apply(Packet packet) { - return doForkJoin(new ManagedServerUpAfterStep(getNext()), packet, startDetails); + + if (startDetailsQueue.isEmpty()) { + return doNext(new ManagedServerUpAfterStep(getNext()), packet); + } else if (isServiceOnlyOrShuttingDown()) { + Collection servers = Collections.singletonList(startDetailsQueue.poll()); + return doForkJoin(this, packet, servers); + } else if (serverAvailableToStart(packet.getSpi(DomainPresenceInfo.class))) { + numStarted.getAndIncrement(); + return doForkJoin(this, packet, Collections.singletonList(startDetailsQueue.poll())); + } else { + return doDelay(this, packet, 100, TimeUnit.MILLISECONDS); + } + } + + private boolean isServiceOnlyOrShuttingDown() { + return Optional.ofNullable(startDetailsQueue.peek().step) + .map(step -> step.getNext() instanceof ServerDownStep).orElse(false); + } + + private boolean serverAvailableToStart(DomainPresenceInfo info) { + return ((numStarted.get() < PodHelper.getScheduledPods(info, clusterName).size()) + && (canStartConcurrently(PodHelper.getReadyPods(info, clusterName).size()))); + } + + private boolean canStartConcurrently(int numReady) { + return ((this.maxConcurrency > 0) && (numStarted.get() < (this.maxConcurrency + numReady - 1))) + || (this.maxConcurrency == 0); } } @@ -171,58 +217,17 @@ private static class StartClusteredServersStepFactory { this.maxConcurrency = maxConcurrency; } + public int getMaxConcurrency() { + return this.maxConcurrency; + } + void add(StepAndPacket serverToStart) { serversToStart.add(serverToStart); } Collection getServerStartsStepAndPackets() { - if (maxConcurrency == 0 || serversToStart.size() <= maxConcurrency) { - return serversToStart; - } - ArrayList steps = new ArrayList<>(maxConcurrency); - IntStream.range(0, maxConcurrency) - .forEach(i -> steps.add(StartClusteredServersStep.createStepAndPacket(serversToStart))); - return steps; - } - - } - - static class StartClusteredServersStep extends Step { - - private final Queue serversToStart; - - static StepAndPacket createStepAndPacket(Queue serversToStart) { - return new StepAndPacket(new StartClusteredServersStep(serversToStart), null); - } - - StartClusteredServersStep(Queue serversToStart) { - super(null); - this.serversToStart = serversToStart; - serversToStart.forEach(stepAndPacket -> setupSequentialStartPacket(stepAndPacket.packet)); - } - - Collection getServersToStart() { return serversToStart; } - - private void setupSequentialStartPacket(Packet packet) { - packet.put(ProcessingConstants.WAIT_FOR_POD_READY, true); - } - - @Override - public NextAction apply(Packet packet) { - Collection servers = Arrays.asList(serversToStart.poll()); - if (servers.isEmpty()) { - return doNext(packet); - } else { - return doForkJoin(this, packet, servers); - } - } - } - - // an interface to provide a hook for unit testing. - interface NextStepFactory { - Step createStatusUpdateStep(Step next); } } diff --git a/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServersUpStep.java b/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServersUpStep.java index ad0923548b0..3b3048cf78f 100644 --- a/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServersUpStep.java +++ b/operator/src/main/java/oracle/kubernetes/operator/steps/ManagedServersUpStep.java @@ -32,6 +32,9 @@ import oracle.kubernetes.weblogic.domain.model.Domain; import oracle.kubernetes.weblogic.domain.model.ServerSpec; +import static oracle.kubernetes.operator.DomainStatusUpdater.MANAGED_SERVERS_STARTING_PROGRESS_REASON; +import static oracle.kubernetes.operator.DomainStatusUpdater.createProgressingStep; + public class ManagedServersUpStep extends Step { static final String SERVERS_UP_MSG = "Running servers for domain with UID: {0}, running list: {1}"; @@ -66,7 +69,9 @@ private static Step scaleDownIfNecessary( List serversToStop = getServersToStop(info, serversToIgnore); if (!serversToStop.isEmpty()) { - insert(steps, new ServerDownIteratorStep(serversToStop, null)); + insert(steps, + Step.chain(createProgressingStep(info, MANAGED_SERVERS_STARTING_PROGRESS_REASON, true, null), + new ServerDownIteratorStep(serversToStop, null))); } return Step.chain(steps.toArray(new Step[0])); diff --git a/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/BaseConfiguration.java b/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/BaseConfiguration.java index 94b8f2ca9ce..fce9fa872c5 100644 --- a/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/BaseConfiguration.java +++ b/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/BaseConfiguration.java @@ -217,7 +217,7 @@ public String getServiceAccountName() { return serverPod.getServiceAccountName(); } - void setNodeName(String nodeName) { + public void setNodeName(String nodeName) { serverPod.setNodeName(nodeName); } diff --git a/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/ServerStatus.java b/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/ServerStatus.java index ee7d76125d8..3b7d1f997b8 100644 --- a/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/ServerStatus.java +++ b/operator/src/main/java/oracle/kubernetes/weblogic/domain/model/ServerStatus.java @@ -208,7 +208,7 @@ public ServerStatus withNodeName(String nodeName) { * * @return health */ - ServerHealth getHealth() { + public ServerHealth getHealth() { return health; } diff --git a/operator/src/main/resources/scripts/introspectDomain.sh b/operator/src/main/resources/scripts/introspectDomain.sh index dd5ebc44253..ae030dae3c7 100644 --- a/operator/src/main/resources/scripts/introspectDomain.sh +++ b/operator/src/main/resources/scripts/introspectDomain.sh @@ -11,20 +11,23 @@ # - encrypted admin user password passed in via a plain-text secret (for use in sit config) # - md5 checksum of the DOMAIN_HOME/security/SerializedSystemIni.dat domain secret file # - situational config files for overriding the configuration within the DOMAIN_HOME +# - Model in Image domain home zips and md5s (when the domain source type is MII) # # It works as part of the following flow: # # (1) When an operator discovers a new domain, it launches this script via an # introspector k8s job. # (2) This script then: -# (2A) Configures and starts a NM via startNodeManager.sh (in NODEMGR_HOME) -# (2B) Calls introspectDomain.py, which depends on the NM -# (2C) Exits 0 on success, non-zero otherwise. +# (2A) Generates MII domain home zips/md5 files if the domain home source type is MII +# (2B) Configures and starts a NM via startNodeManager.sh (in NODEMGR_HOME) +# (2C) Calls introspectDomain.py, which depends on the NM and puts files in stdout +# (2D) Exits 0 on success, non-zero otherwise. # (5) Operator parses the output of introspectDomain.py into files and: # (5A) Uses one of the files to get the domain's name, cluster name, ports, etc. # (5B) Deploys a config map for the domain containing the files. # (6) Operator starts pods for domain's WebLogic servers. # (7) Pod 'startServer.sh' script loads files from the config map, +# generates a domain home from the files for the MII case, # copies/uses encrypted files, and applies sit config files. It # also checks that domain secret md5 cksum matches the cksum # obtained by this script. @@ -35,24 +38,29 @@ # ORACLE_HOME = Oracle Install Home - defaults via utils.sh/exportInstallHomes # MW_HOME = MiddleWare Install Home - defaults to ${ORACLE_HOME} # WL_HOME = WebLogic Install Home - defaults to ${ORACLE_HOME}/wlserver +# INTROSPECTOR_LOG_FILE_MAX = Max number of log files to keep around (default 11). # -# - Transitively requires other env vars for startNodeManager.sh, wlst.sh, +# - Transitively requires other env vars for startNodeManager.sh, wlst.sh, modelInImage.sh, # and introspectDomain.py (see these scripts to find out what else needs to be set). # SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )" +# # setup tracing +# source ${SCRIPTPATH}/utils.sh [ $? -ne 0 ] && echo "[SEVERE] Missing file ${SCRIPTPATH}/utils.sh" && exit 1 traceTiming "INTROSPECTOR '${DOMAIN_UID}' MAIN START" - +# # Local createFolder method which does an 'exit 1' instead of exitOrLoop for # immediate failure during introspection +# + function createFolder { mkdir -m 750 -p $1 if [ ! -d $1 ]; then @@ -61,21 +69,61 @@ function createFolder { fi } -trace "Introspecting the domain" +# +# setup MII functions in case this is a MII domain +# + +source ${SCRIPTPATH}/modelInImage.sh +[ $? -ne 0 ] && trace SEVERE "Error sourcing ${SCRIPTPATH}/modelInImage.sh" && exit 1 + +# +# setup introspector log file +# keep max 11 total by default (delete oldest first) +# + +traceDirs before LOG_HOME + +if [ ! -z "${LOG_HOME}" ] && [ ! -d "${LOG_HOME}" ]; then + trace "Creating log home directory: '${LOG_HOME}'" + createFolder ${LOG_HOME} +fi + +ilog_dir="${LOG_HOME:-/tmp}" +ilog_file="${ilog_dir}/introspector_script.out" + +if [ ! -d "${ilog_dir}" ]; then + trace "Creating introspector log directory: '${ilog_dir}'" + createFolder "${ilog_dir}" +fi + +testLogFileRotate "${ilog_file}" +[ $? -ne 0 ] && trace SEVERE "Error accessing '${ilog_dir}'. See previous log messages." && exit 1 + +logFileRotate ${ilog_file} ${INTROSPECTOR_LOG_FILE_MAX:-11} + +# +# main introspection function +# + +function doIntrospect() { + + trace "Introspecting domain '${DOMAIN_UID}', log location: '$ilog_file'" + + traceDirs after LOG_HOME -# list potentially interesting env-vars and dirs before they're updated by export.*Homes + # list potentially interesting env-vars and dirs before they're updated by export.*Homes -traceEnv before -traceDirs before + traceEnv before + traceDirs before DOMAIN_HOME DATA_HOME -# set defaults -# set ORACLE_HOME/WL_HOME/MW_HOME to defaults if needed + # set defaults + # set ORACLE_HOME/WL_HOME/MW_HOME to defaults if needed -exportInstallHomes + exportInstallHomes -# check if prereq env-vars, files, and directories exist + # check if prereq env-vars, files, and directories exist -checkEnv -q \ + checkEnv -q \ DOMAIN_UID \ NAMESPACE \ ORACLE_HOME \ @@ -86,37 +134,30 @@ checkEnv -q \ OPERATOR_ENVVAR_NAMES \ || exit 1 -for script_file in "${SCRIPTPATH}/wlst.sh" \ - "${SCRIPTPATH}/startNodeManager.sh" \ - "${SCRIPTPATH}/modelInImage.sh" \ - "${SCRIPTPATH}/introspectDomain.py"; do - [ ! -f "$script_file" ] && trace SEVERE "Missing file '${script_file}'." && exit 1 -done - -for dir_var in JAVA_HOME WL_HOME MW_HOME ORACLE_HOME; do - [ ! -d "${!dir_var}" ] && trace SEVERE "Missing ${dir_var} directory '${!dir_var}'." && exit 1 -done - -# -# DATA_HOME env variable exists implies override directory specified. Attempt to create directory -# -if [ ! -z "${DATA_HOME}" ] && [ ! -d "${DATA_HOME}" ]; then - trace "Creating data home directory: '${DATA_HOME}'" - createFolder ${DATA_HOME} -fi - - -traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII CREATE DOMAIN START" + for script_file in "${SCRIPTPATH}/wlst.sh" \ + "${SCRIPTPATH}/startNodeManager.sh" \ + "${SCRIPTPATH}/introspectDomain.py"; do + [ ! -f "$script_file" ] && trace SEVERE "Missing file '${script_file}'." && exit 1 + done + + for dir_var in JAVA_HOME WL_HOME MW_HOME ORACLE_HOME; do + [ ! -d "${!dir_var}" ] && trace SEVERE "Missing ${dir_var} directory '${!dir_var}'." && exit 1 + done + + # + # DATA_HOME env variable exists implies override directory specified. Attempt to create directory + # + if [ ! -z "${DATA_HOME}" ] && [ ! -d "${DATA_HOME}" ]; then + trace "Creating data home directory: '${DATA_HOME}'" + createFolder ${DATA_HOME} + fi -source ${SCRIPTPATH}/modelInImage.sh -if [ $? -ne 0 ]; then - trace SEVERE "Error sourcing modelInImage.sh" && exit 1 -fi + traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII CREATE DOMAIN START" -# Add another env/attribute in domain yaml for model in image -# log error if dir exists and attribute set -DOMAIN_CREATED=0 -if [ ${DOMAIN_SOURCE_TYPE} == "FromModel" ]; then + # Add another env/attribute in domain yaml for model in image + # log error if dir exists and attribute set + DOMAIN_CREATED=0 + if [ ${DOMAIN_SOURCE_TYPE} == "FromModel" ]; then trace "Beginning Model In Image" command -v gzip if [ $? -ne 0 ] ; then @@ -142,43 +183,39 @@ if [ ${DOMAIN_SOURCE_TYPE} == "FromModel" ]; then createWLDomain || exit 1 created_domain=$DOMAIN_CREATED trace "Create domain return code = " ${created_domain} -else + else created_domain=1 -fi - -traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII CREATE DOMAIN END" - + fi -# check DOMAIN_HOME for a config/config.xml, reset DOMAIN_HOME if needed + traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII CREATE DOMAIN END" -exportEffectiveDomainHome || exit 1 + # check DOMAIN_HOME for a config/config.xml, reset DOMAIN_HOME if needed -# list potentially interesting env-vars and dirs after they're updated by export.*Homes + exportEffectiveDomainHome || exit 1 -traceEnv after -traceDirs after + # list potentially interesting env-vars and dirs after they're updated by export.*Homes -# check if we're using a supported WebLogic version -# (the check will log a message if it fails) + traceEnv after + traceDirs after DOMAIN_HOME DATA_HOME -checkWebLogicVersion || exit 1 + # check if we're using a supported WebLogic version + # (the check will log a message if it fails) -# start node manager -# run instrospector wlst script -if [ ${created_domain} -ne 0 ]; then + checkWebLogicVersion || exit 1 + # start node manager + # run instrospector wlst script + if [ ${created_domain} -ne 0 ]; then traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII NM START" - # start node manager -why ?? + # introspectDomain.py uses an NM to setup credentials for the server NMs + # (see 'nmConnect' in introspectDomain.py) trace "Starting node manager" ${SCRIPTPATH}/startNodeManager.sh || exit 1 traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII NM END" - traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII MD5 START" - traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII NM END" - traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII MD5 START" # put domain secret's md5 cksum in file '/tmp/DomainSecret.md5' @@ -186,14 +223,38 @@ if [ ${created_domain} -ne 0 ]; then generateDomainSecretMD5File '/tmp/DomainSecret.md5' || exit 1 traceTiming "INTROSPECTOR '${DOMAIN_UID}' MII MD5 END" - traceTiming "INTROSPECTOR '${DOMAIN_UID}' INTROSPECT START" trace "Running introspector WLST script ${SCRIPTPATH}/introspectDomain.py" ${SCRIPTPATH}/wlst.sh ${SCRIPTPATH}/introspectDomain.py || exit 1 traceTiming "INTROSPECTOR '${DOMAIN_UID}' INTROSPECT END" -fi -trace "Domain introspection complete" + fi + trace "Domain introspection complete" +} -exit 0 +# we have different log file modes in case we need to revert 'tee' mode + +case "${INTROSPECTOR_LOG_FILE_MODE:-tee}" in + tee) + set -o pipefail + doIntrospect |& tee $ilog_file + exit $? + ;; + bg_and_tail) + ${SCRIPTPATH}/tailLog.sh $ilog_file /tmp/dontcare & + tail_log_pid=$! + doIntrospect >> $ilog_file 2>&1 & + wait $! + exitCode=$? + # sleep 1 second in case background 'tail' needs time to catch up + sleep 1 + kill -9 $tail_log_pid + exit $exitCode + ;; + *) + # no log file - everything simply goes to stdout/stderr (old behavior) + doIntrospect + exit $? + ;; +esac diff --git a/operator/src/main/resources/scripts/modelInImage.sh b/operator/src/main/resources/scripts/modelInImage.sh index 9620c487608..5a997e1d482 100755 --- a/operator/src/main/resources/scripts/modelInImage.sh +++ b/operator/src/main/resources/scripts/modelInImage.sh @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # This script contains the all the function of model in image -# It is used by introspectDomain.sh job and starServer.sh +# It is used by introspectDomain.sh job and startServer.sh source ${SCRIPTPATH}/utils.sh @@ -532,8 +532,8 @@ function diff_model() { org.python.util.jython \ ${SCRIPTPATH}/model_diff.py $1 $2 > ${WDT_OUTPUT} 2>&1 if [ $? -ne 0 ] ; then - trace SEVERE "Failed to compare models. Check logs for error." - trace SEVERE "$(cat ${WDT_OUTPUT})" + trace SEVERE "Failed to compare models. Check logs for error. Comparison output:" + cat ${WDT_OUTPUT} exitOrLoop fi trace "Exiting diff_model" @@ -648,11 +648,8 @@ function generateMergedModel() { ${archive_list} ${variable_list} -domain_type ${WDT_DOMAIN_TYPE} > ${WDT_OUTPUT} ret=$? if [ $ret -ne 0 ]; then - trace SEVERE "WDT Failed: Validate Model Failed " - if [ -d ${LOG_HOME} ] && [ ! -z ${LOG_HOME} ] ; then - cp ${WDT_OUTPUT} ${LOG_HOME}/introspectJob_validateDomain.log - fi - trace SEVERE "$(cat ${WDT_OUTPUT})" + trace SEVERE "WDT Failed: Validate Model Failed:" + cat ${WDT_OUTPUT} exitOrLoop fi @@ -679,11 +676,8 @@ function wdtCreatePrimordialDomain() { > ${WDT_OUTPUT} ret=$? if [ $ret -ne 0 ]; then - trace SEVERE "WDT Create Domain Failed ${ret}" - if [ -d ${LOG_HOME} ] && [ ! -z ${LOG_HOME} ] ; then - cp ${WDT_OUTPUT} ${LOG_HOME}/introspectJob_createDomain.log - fi - trace SEVERE "$(cat ${WDT_OUTPUT})" + trace SEVERE "WDT Create Domain Failed, ret=${ret}:" + cat ${WDT_OUTPUT} exitOrLoop fi @@ -712,11 +706,8 @@ function wdtUpdateModelDomain() { ret=$? if [ $ret -ne 0 ]; then - trace SEVERE "WDT Update Domain Failed " - if [ -d ${LOG_HOME} ] && [ ! -z ${LOG_HOME} ] ; then - cp ${WDT_OUTPUT} ${LOG_HOME}/introspectJob_updateDomain.log - fi - trace SEVERE "$(cat ${WDT_OUTPUT})" + trace SEVERE "WDT Update Domain Failed:" + cat ${WDT_OUTPUT} exitOrLoop fi @@ -788,8 +779,8 @@ function encrypt_decrypt_model() { trace SEVERE "Fatal Error: Failed to $1 domain model. This error is irrecoverable. Check to see if the secret " \ "described in the configuration.model.runtimeEncryptionSecret domain resource field has been changed since the " \ "creation of the domain. You can either reset the password to the original one and try again or delete "\ - "and recreate the domain." - trace SEVERE "$(cat ${WDT_OUTPUT})" + "and recreate the domain. Failure output:" + cat ${WDT_OUTPUT} exitOrLoop fi @@ -829,8 +820,8 @@ function encrypt_decrypt_domain_secret() { trace SEVERE "Fatal Error: Failed to $1 domain secret. This error is irrecoverable. Check to see if the secret " \ "described in the configuration.model.runtimeEncryptionSecret domain resource field has been changed since the " \ "creation of the domain. You can either reset the password to the original one and try again or delete "\ - "and recreate the domain." - trace SEVERE "$(cat ${WDT_OUTPUT})" + "and recreate the domain. Failure output:" + cat ${WDT_OUTPUT} exitOrLoop fi diff --git a/operator/src/main/resources/scripts/startNodeManager.sh b/operator/src/main/resources/scripts/startNodeManager.sh index f3ebb3dff6d..33d74d68092 100644 --- a/operator/src/main/resources/scripts/startNodeManager.sh +++ b/operator/src/main/resources/scripts/startNodeManager.sh @@ -27,10 +27,14 @@ # ${DOMAIN_UID}/${SERVER_NAME}_nodemanager.out # Default: # Use LOG_HOME. If LOG_HOME not set, use NODEMGR_HOME. +# NODEMGR_LOG_FILE_MAX = max NM .log and .out files to keep around (default=11) +# # ADMIN_PORT_SECURE = "true" if the admin protocol is secure. Default is false +# # FAIL_BOOT_ON_SITUATIONAL_CONFIG_ERROR = "true" if WebLogic server should fail to # boot if situational configuration related errors are # found. Default to "true" if unspecified. +# # NODEMGR_MEM_ARGS = JVM mem args for starting the Node Manager instance # NODEMGR_JAVA_OPTIONS = Java options for starting the Node Manager instance # @@ -172,46 +176,45 @@ fi # Create nodemanager.properties and nodemanager.domains files in NM home # -nm_props_file=${NODEMGR_HOME}/nodemanager.properties nm_domains_file=${NODEMGR_HOME}/nodemanager.domains - cat < ${nm_domains_file} ${domain_name}=${DOMAIN_HOME} EOF +[ ! $? -eq 0 ] && trace SEVERE "Failed to create '${nm_domains_file}'." && exit 1 - [ ! $? -eq 0 ] && trace SEVERE "Failed to create '${nm_domains_file}'." && exit 1 +nm_props_file=${NODEMGR_HOME}/nodemanager.properties cat < ${nm_props_file} #Node manager properties - DomainsFile=${nm_domains_file} - LogLimit=0 - DomainsDirRemoteSharingEnabled=true - PropertiesVersion=12.2.1 - AuthenticationEnabled=false NodeManagerHome=${NODEMGR_HOME} JavaHome=${JAVA_HOME} - LogLevel=FINEST + DomainsFile=${nm_domains_file} DomainsFileEnabled=true - ListenAddress=127.0.0.1 + DomainsDirRemoteSharingEnabled=true NativeVersionEnabled=true + PropertiesVersion=12.2.1 + ListenAddress=127.0.0.1 ListenPort=5556 - LogToStderr=true - weblogic.StartScriptName=startWebLogic.sh + ListenBacklog=50 + AuthenticationEnabled=false SecureListener=false - LogCount=1 - QuitEnabled=false - LogAppend=true + weblogic.StartScriptEnabled=true + weblogic.StartScriptName=startWebLogic.sh weblogic.StopScriptEnabled=false + QuitEnabled=false StateCheckInterval=500 CrashRecoveryEnabled=false - weblogic.StartScriptEnabled=true - LogFormatter=weblogic.nodemanager.server.LogFormatter - ListenBacklog=50 LogFile=${nodemgr_log_file} + LogToStderr=true + LogFormatter=weblogic.nodemanager.server.LogFormatter + LogAppend=true + LogLimit=0 + LogLevel=FINEST + LogCount=1 EOF - [ ! $? -eq 0 ] && trace SEVERE "Failed to create '${nm_props_file}'." && exit 1 +[ ! $? -eq 0 ] && trace SEVERE "Failed to create '${nm_props_file}'." && exit 1 ############################################################################### # @@ -238,21 +241,11 @@ if [ ! "${SERVER_NAME}" = "introspector" ]; then [ ! $? -eq 0 ] && trace SEVERE "Could not remove stale file '$wl_state_file'." && exit 1 fi - cat < ${wl_props_file} # Server startup properties AutoRestart=true RestartMax=2 -RotateLogOnStartup=false -RotationType=bySize -RotationTimeStart=00\\:00 -RotatedFileCount=100 -RestartDelaySeconds=0 -FileSizeKB=5000 -FileTimeSpanFactor=3600000 RestartInterval=3600 -NumberOfFilesLimited=true -FileTimeSpan=24 NMHostName=${SERVICE_NAME} Arguments=${USER_MEM_ARGS} -Dweblogic.SituationalConfig.failBootOnError=${FAIL_BOOT_ON_SITUATIONAL_CONFIG_ERROR} ${serverOutOption} ${JAVA_OPTIONS} @@ -316,18 +309,16 @@ export JAVA_OPTIONS="${NODEMGR_MEM_ARGS} ${NODEMGR_JAVA_OPTIONS} -Dweblogic.Root ############################################################################### # # Start the NM -# 1) remove old NM log file, if it exists +# 1) rotate old NM log file, and old NM out file, if they exist # 2) start NM in background -# 3) wait up to 15 seconds for NM by monitoring log file -# 4) 'exit 1' if wait more than 15 seconds +# 3) wait up to ${NODE_MANAGER_MAX_WAIT:-60} seconds for NM by monitoring NM's .out file +# 4) log SEVERE, log INFO with 'exit 1' if wait more than ${NODE_MANAGER_MAX_WAIT:-60} seconds # trace "Start the nodemanager, node manager home is '${NODEMGR_HOME}', log file is '${nodemgr_log_file}', out file is '${nodemgr_out_file}'." -rm -f ${nodemgr_log_file} -[ ! $? -eq 0 ] && trace SEVERE "Could not remove old file '$nodemgr_log_file'." && exit 1 -rm -f ${nodemgr_out_file} -[ ! $? -eq 0 ] && trace SEVERE "Could not remove old file '$nodemgr_out_file'." && exit 1 +logFileRotate ${nodemgr_log_file} ${NODEMGR_LOG_FILE_MAX:-11} +logFileRotate ${nodemgr_out_file} ${NODEMGR_LOG_FILE_MAX:-11} ${stm_script} > ${nodemgr_out_file} 2>&1 & diff --git a/operator/src/main/resources/scripts/startServer.sh b/operator/src/main/resources/scripts/startServer.sh index 1c60660ee67..b0f7bfdd2c6 100755 --- a/operator/src/main/resources/scripts/startServer.sh +++ b/operator/src/main/resources/scripts/startServer.sh @@ -242,7 +242,7 @@ function prepareMIIServer() { # trace env vars and dirs before export.*Home calls traceEnv before -traceDirs before +traceDirs before DOMAIN_HOME LOG_HOME DATA_HOME traceTiming "POD '${SERVICE_NAME}' MII UNZIP START" @@ -316,7 +316,7 @@ exportEffectiveDomainHome || exitOrLoop # trace env vars and dirs after export.*Home calls traceEnv after -traceDirs after +traceDirs after DOMAIN_HOME LOG_HOME DATA_HOME # # Check if introspector actually ran. This should never fail since diff --git a/operator/src/main/resources/scripts/tailLog.sh b/operator/src/main/resources/scripts/tailLog.sh old mode 100644 new mode 100755 index ac429090b3d..8733fc2e12c --- a/operator/src/main/resources/scripts/tailLog.sh +++ b/operator/src/main/resources/scripts/tailLog.sh @@ -13,7 +13,7 @@ echo $$ > $2 while true ; do if [ -f $1 ]; then - tail -F -n +0 $1 || sleep 10 + tail -F -s 0.1 -n +0 $1 || sleep 10 fi sleep 0.1 done diff --git a/operator/src/main/resources/scripts/utils.sh b/operator/src/main/resources/scripts/utils.sh index 3d1e96fe257..755d8a6999a 100755 --- a/operator/src/main/resources/scripts/utils.sh +++ b/operator/src/main/resources/scripts/utils.sh @@ -244,6 +244,7 @@ function checkEnv() { return 0 } +# # traceEnv: # purpose: trace a curated set of env vars # warning: we purposely avoid dumping all env vars @@ -286,15 +287,17 @@ function traceEnv() { } # -# traceDirs before|after -# Trace contents and owner of DOMAIN_HOME/LOG_HOME/DATA_HOME directories -# +# traceDirs before|after DOMAIN_HOME LOG_HOME DATA_HOME ... +# Trace contents and owner of directory for the specified env vars... + function traceDirs() { trace "id = '`id`'" + local keyword="$1" + shift local indir - for indir in DOMAIN_HOME LOG_HOME DATA_HOME; do + for indir in $*; do [ -z "${!indir}" ] && continue - trace "Directory trace for $indir=${!indir} ($1)" + trace "Directory trace for $indir=${!indir} ($keyword)" local cnt=0 local odir="" local cdir="${!indir}/*" @@ -309,6 +312,178 @@ function traceDirs() { } +# +# internal helper for logFileRotate(): +# return all files that match ${1}NNNNN in numeric order +# +function logFiles() { + ls -1 ${1}[0-9][0-9][0-9][0-9][0-9] 2>/dev/null +} + +# +# internal helper for logFileRotate(): +# return all files that match ${1}NNNNN in reverse order +# +function logFilesReverse() { + ls -1r ${1}[0-9][0-9][0-9][0-9][0-9] 2>/dev/null +} + +# +# internal helper for logFileRotate(): +# parse NNNNN out of $1, but if not found at end of $1 return 0 +# +function logFileNum() { + local logNum=$(echo "$1" | sed 's/.*\([0-9][0-9][0-9][0-9][0-9]\)$/\1/' | sed 's/^0*//') + echo ${logNum:-0} +} + +# +# internal helper for logFileRotate(): +# Rotate file from ${1} to ${1}NNNNN +# $1 = filename +# $2 = max files to keep +# $3 = if "quiet", then suppress any tracing +# See logFileRotate() for detailed usage.A +# +function logFileRotateInner() { + local logmax=${2:-7} + local logcur + + [ $logmax -le 1 ] && logmax=1 + [ $logmax -gt 40000 ] && logmax=40000 + + # find highest numbered log file (0 if none found) + + local lastlogfile=$(logFiles "$1" | tail -n 1) + local lastlognum=$(logFileNum $lastlogfile) + + # delete oldest log files + + local _logmax_=$logmax + if [ -f "$1" ]; then + # account for the current file (the one we're about to rotate) if there is one + _logmax_=$((logmax - 1)) + fi + for logcur in $(logFilesReverse ${1} | tail -n +${_logmax_}); do + [ ! "$3" = "quiet" ] && trace "Removing old log file '${logcur}'." + rm $logcur + done + + # if highest lognum is 99999, renumber existing files starting with 1 + # (there should be no overlap because we just deleted older files) + + if [ $lastlognum -ge 99999 ]; then + lastlognum=0 + local logcur + for logcur in $(logFiles "$1"); do + lastlognum=$((lastlognum + 1)) + mv "$logcur" "${1}$(printf "%0.5i" $lastlognum)" + done + fi + + # rotate $1 if it exists, or simply remove it if logmax is 1 + + if [ ${logmax} -gt 1 ]; then + if [ -f "$1" ]; then + local nextlognum=$((lastlognum + 1)) + [ ! "$3" = "quiet" ] && trace "Rotating '$1' to '${1}$(printf "%0.5i" $nextlognum)'." + mv "$1" "${1}$(printf "%0.5i" $nextlognum)" + fi + else + rm -f "$1" + fi +} + +# +# internal helper for logFileRotate(): +# +function testLFRWarn() { + trace WARNING "File rotation test failed. Log files named '${1}' will not be rotated, errcode='${2}'." +} + +# +# internal helper for logFileRotate(): +# Convert new-lines to space, multi-spaces to single space, and trim +# +function testTR() { + tr '\n' ' ' | sed 's/ */ /g' | sed 's/ $//g' +} + +# +# internal helper for logFileRotate(): +# Verify logFileRotateInner works, return non-zero if not. +# +function testLogFileRotate() { + local curfile=${1:-/tmp/unknown} + local fname=$(dirname $curfile)/testFileRotate.$RANDOM.$SECONDS.tmp + mkdir -p $(dirname $curfile) + + rm -f ${fname}* + logFileRotateInner ${fname} 7 quiet + [ ! "$(ls ${fname}* 2>/dev/null)" = "" ] && testLFRWarn "$curfile" A1 && return 1 + echo "a" > ${fname} && logFileRotateInner ${fname} 2 quiet + [ ! "$(ls ${fname}*)" = "${fname}00001" ] && testLFRWarn "$curfile" B1 && return 1 + [ ! "$(cat ${fname}00001)" = "a" ] && testLFRWarn "$curfile" B2 && return 1 + logFileRotateInner ${fname} 2 quiet + [ ! "$(ls ${fname}*)" = "${fname}00001" ] && testLFRWarn "$curfile" C1 && return 1 + [ ! "$(cat ${fname}00001)" = "a" ] && testLFRWarn "$curfile" C2 && return 1 + echo "b" > ${fname} && logFileRotateInner ${fname} 2 quiet + [ ! "$(ls ${fname}*)" = "${fname}00002" ] && testLFRWarn "$curfile" C3 && return 1 + [ ! "$(cat ${fname}00002)" = "b" ] && testLFRWarn "$curfile" C4 && return 1 + echo "c" > ${fname} && logFileRotateInner ${fname} 0 quiet + [ ! "$(ls ${fname}* 2>/dev/null)" = "" ] && testLFRWarn "$curfile" D1 && return 1 + + echo 1 > ${fname} && logFileRotateInner ${fname} 3 quiet + [ ! "$(ls ${fname}* | testTR)" = "${fname}00001" ] && testLFRWarn "$curfile" E1 && return 1 + echo 2 > ${fname} && logFileRotateInner ${fname} 3 quiet + [ ! "$(ls ${fname}* | testTR)" = "${fname}00001 ${fname}00002" ] && testLFRWarn "$curfile" E2 && return 1 + echo 3 > ${fname} && logFileRotateInner ${fname} 3 quiet + [ ! "$(ls ${fname}* | testTR)" = "${fname}00002 ${fname}00003" ] && testLFRWarn "$curfile" E3 && return 1 + echo 4 > ${fname} && logFileRotateInner ${fname} 3 quiet + [ ! "$(ls ${fname}* | testTR)" = "${fname}00003 ${fname}00004" ] && testLFRWarn "$curfile" E4 && return 1 + [ ! "$(cat ${fname}00003)" = "3" ] && testLFRWarn "$curfile" E5 && return 1 + [ ! "$(cat ${fname}00004)" = "4" ] && testLFRWarn "$curfile" E6 && return 1 + local count + rm ${fname}* + echo "0" > ${fname} + for count in 99997 99998 99999; do + echo $count > ${fname}${count} + done + logFileRotateInner ${fname} 4 quiet + [ ! "$(ls ${fname}* | testTR)" = "${fname}00001 ${fname}00002 ${fname}00003" ] \ + && testLFRWarn "$curfile" F1 && return 1 + [ ! "$(cat ${fname}00001)" = "99998" ] && testLFRWarn "$curfile" F2 && return 1 + [ ! "$(cat ${fname}00002)" = "99999" ] && testLFRWarn "$curfile" F3 && return 1 + [ ! "$(cat ${fname}00003)" = "0" ] && testLFRWarn "$curfile" F4 && return 1 + logFileRotateInner ${fname} 2 quiet + [ ! "$(ls ${fname}*)" = "${fname}00003" ] && testLFRWarn "$curfile" F5 && return 1 + rm ${fname}* + return 0 +} + +# +# logFileRotate +# Rotate file from ${1} to ${1}NNNNN, starting with 00001. +# $1 = filename +# $2 = max log files, default is 7 +# Notes: +# - $2 = 0 or 1 implies there should be no saved files +# and causes $1 to be removed instead of rotated +# +# - Silently tests rotation on scratch files first, and, +# if that fails logs a WARNING, does nothing, and returns. +# +# - If current max file is 99999, then old files are +# renumbered starting with 00001. +# +function logFileRotate() { + # test rotation, if it fails, log a Warning that rotation of $1 is skipped. + testLogFileRotate "$1" || return 0 + # now do the actual rotation + logFileRotateInner "$1" $2 +} + + # # exportEffectiveDomainHome # if DOMAIN_HOME='${ORACLE_HOME}/user_projects/domains': diff --git a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorDelegateStub.java b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorDelegateStub.java index 3c2c150ce19..41327ef7ed9 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorDelegateStub.java +++ b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorDelegateStub.java @@ -59,6 +59,11 @@ public ScheduledFuture scheduleWithFixedDelay( return testSupport.scheduleWithFixedDelay(command, initialDelay, delay, unit); } + @Override + public void runSteps(Step firstStep) { + testSupport.runSteps(firstStep); + } + private static class PassthroughPodAwaiterStepFactory implements PodAwaiterStepFactory { @Override public Step waitForReady(V1Pod pod, Step next) { diff --git a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTest.java b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTest.java index 29cb97f0434..44772e7df70 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTest.java @@ -160,6 +160,12 @@ public void whenDomainSpecNotChanged_dontRunUpdateThread() { makeRightOperation.execute(); assertThat(logRecords, containsFine(NOT_STARTING_DOMAINUID_THREAD)); + Domain updatedDomain = testSupport.getResourceWithName(DOMAIN, domain.getDomainUid()); + assertThat(getResourceVersion(updatedDomain), equalTo(getResourceVersion(domain))); + } + + private String getResourceVersion(Domain domain) { + return Optional.of(domain).map(Domain::getMetadata).map(V1ObjectMeta::getResourceVersion).orElse(""); } @Test @@ -199,6 +205,7 @@ public void whenMakeRightRun_updateSDomainStatus() { assertThat(getDesiredState(updatedDomain, MANAGED_SERVER_NAMES[2]), equalTo(SHUTDOWN_STATE)); assertThat(getDesiredState(updatedDomain, MANAGED_SERVER_NAMES[3]), equalTo(SHUTDOWN_STATE)); assertThat(getDesiredState(updatedDomain, MANAGED_SERVER_NAMES[4]), equalTo(SHUTDOWN_STATE)); + assertThat(getResourceVersion(updatedDomain), not(getResourceVersion(domain))); } @Test diff --git a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTestSetup.java b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTestSetup.java index 9a00221c80b..1605a2912a8 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTestSetup.java +++ b/operator/src/test/java/oracle/kubernetes/operator/DomainProcessorTestSetup.java @@ -32,6 +32,7 @@ public class DomainProcessorTestSetup { public static final String NS = "namespace"; public static final String SECRET_NAME = "secret-name"; public static final String KUBERNETES_UID = "12345"; + public static final String NODE_NAME = "Node1"; private static final String INTROSPECTION_JOB = LegalNames.toJobIntrospectorName(UID); private static final String INTROSPECT_RESULT = @@ -88,13 +89,14 @@ private static V1ObjectMeta withTimestamps(V1ObjectMeta meta) { * @return a domain */ public static Domain createTestDomain() { + DomainSpec ds = new DomainSpec() + .withWebLogicCredentialsSecret(new V1SecretReference().name(SECRET_NAME).namespace(NS)); + ds.setNodeName(NODE_NAME); return new Domain() .withApiVersion(KubernetesConstants.DOMAIN_GROUP + "/" + KubernetesConstants.DOMAIN_VERSION) .withKind(KubernetesConstants.DOMAIN) .withMetadata(withTimestamps(new V1ObjectMeta().name(UID).namespace(NS).uid(KUBERNETES_UID))) - .withSpec( - new DomainSpec() - .withWebLogicCredentialsSecret(new V1SecretReference().name(SECRET_NAME).namespace(NS))); + .withSpec(ds); } /** diff --git a/operator/src/test/java/oracle/kubernetes/operator/DomainStatusUpdaterTest.java b/operator/src/test/java/oracle/kubernetes/operator/DomainStatusUpdaterTest.java index d8cc3830c93..a0da76d3b35 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/DomainStatusUpdaterTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/DomainStatusUpdaterTest.java @@ -70,6 +70,8 @@ public class DomainStatusUpdaterTest { private String reason = generator.getUniqueString(); private RuntimeException failure = new RuntimeException(message); private String validationWarning = generator.getUniqueString(); + private final DomainProcessorImpl processor = + new DomainProcessorImpl(DomainProcessorDelegateStub.createDelegate(testSupport)); /** * Setup test environment. @@ -1010,12 +1012,57 @@ public void whenDomainHasProgressingTrueCondition_failedStepRemovesIt() { } @Test - public void whenDomainHasAvailableCondition_failedStepRemovesIt() { - domain.getStatus().addCondition(new DomainCondition(Available)); + public void whenPacketNotPopulatedBeforeUpdateServerStatus_resourceVersionUpdated() { + setupInitialServerStatus(); + String cachedResourceVersion = getRecordedDomain().getMetadata().getResourceVersion(); - testSupport.runSteps(DomainStatusUpdater.createFailedStep(failure, endStep)); + // Clear the server maps in the packet, and run StatusUpdateStep, the domain resource + // version should be updated because server health information is removed from domain status. + clearPacketServerStatusMaps(); + testSupport.runSteps(DomainStatusUpdater.createStatusUpdateStep(endStep)); - assertThat(getRecordedDomain(), not(hasCondition(Available))); + assertThat(getRecordedDomain().getMetadata().getResourceVersion(), not(cachedResourceVersion)); + } + + @Test + public void whenPacketPopulatedBeforeUpdateServerStatus_resourceVersionNotUpdated() { + setupInitialServerStatus(); + String cachedResourceVersion = getRecordedDomain().getMetadata().getResourceVersion(); + + // Clear the server maps in the packet, run StatusUpdateStep after running + // PopulatePacketServerMapsStep, the domain resource version should NOT be updated because + // the server maps are populated in the packet with the existing server status + clearPacketServerStatusMaps(); + + testSupport.runSteps( + processor.createPopulatePacketServerMapsStep( + DomainStatusUpdater.createStatusUpdateStep(endStep))); + + assertThat(getRecordedDomain().getMetadata().getResourceVersion(), equalTo(cachedResourceVersion)); + } + + private void setupInitialServerStatus() { + setClusterAndNodeName(getPod("server1"), "clusterA", "node1"); + setClusterAndNodeName(getPod("server2"), "clusterB", "node2"); + + configSupport.addWlsCluster("clusterA", "server1"); + configSupport.addWlsCluster("clusterB", "server2"); + generateStartupInfos("server1", "server2"); + testSupport.addToPacket(DOMAIN_TOPOLOGY, configSupport.createDomainConfig()); + + // Run StatusUpdateStep with server maps in the packet to set up the initial domain status + testSupport.addToPacket( + SERVER_STATE_MAP, ImmutableMap.of("server1", RUNNING_STATE, "server2", SHUTDOWN_STATE)); + testSupport.addToPacket( + SERVER_HEALTH_MAP, + ImmutableMap.of("server1", overallHealth("health1"), "server2", overallHealth("health2"))); + + testSupport.runSteps(DomainStatusUpdater.createStatusUpdateStep(endStep)); + } + + private void clearPacketServerStatusMaps() { + testSupport.addToPacket(SERVER_STATE_MAP, null); + testSupport.addToPacket(SERVER_HEALTH_MAP, null); } private Domain getRecordedDomain() { diff --git a/operator/src/test/java/oracle/kubernetes/operator/DomainUpPlanTest.java b/operator/src/test/java/oracle/kubernetes/operator/DomainUpPlanTest.java index 0cf38d4c7b0..e7dd44ffe5c 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/DomainUpPlanTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/DomainUpPlanTest.java @@ -72,6 +72,7 @@ public void setUp() throws NoSuchFieldException { mementos.add(InMemoryCertificates.install()); mementos.add(TuningParametersStub.install()); + testSupport.defineResources(domain); testSupport.addDomainPresenceInfo(domainPresenceInfo); } diff --git a/operator/src/test/java/oracle/kubernetes/operator/JobWatcherTest.java b/operator/src/test/java/oracle/kubernetes/operator/JobWatcherTest.java index bab1c2709eb..843d18d77ea 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/JobWatcherTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/JobWatcherTest.java @@ -153,7 +153,11 @@ private V1Job markJobFailed(V1Job job) { } private V1Job markJobTimedOut(V1Job job) { - return setFailedWithReason(job, "DeadlineExceeded"); + return markJobTimedOut(job, "DeadlineExceeded"); + } + + private V1Job markJobTimedOut(V1Job job, String reason) { + return setFailedWithReason(job, reason); } private V1Job setFailedWithReason(V1Job job, String reason) { @@ -222,8 +226,16 @@ public void whenWaitForReadyAppliedToIncompleteJob_dontPerformNextStep() { } @Test - public void whenWaitForReadyAppliedToTimedOutJob_terminateWithException() { - startWaitForReady(this::markJobTimedOut); + public void whenWaitForReadyAppliedToTimedOutJobWithDeadlineExceeded_terminateWithException() { + startWaitForReady(job -> markJobTimedOut(job, "DeadlineExceeded")); + + assertThat(terminalStep.wasRun(), is(false)); + testSupport.verifyCompletionThrowable(JobWatcher.DeadlineExceededException.class); + } + + @Test + public void whenWaitForReadyAppliedToTimedOutJobWithBackoffLimitExceeded_terminateWithException() { + startWaitForReady(job -> markJobTimedOut(job, "BackoffLimitExceeded")); assertThat(terminalStep.wasRun(), is(false)); testSupport.verifyCompletionThrowable(JobWatcher.DeadlineExceededException.class); diff --git a/operator/src/test/java/oracle/kubernetes/operator/helpers/IntrospectionStatusTest.java b/operator/src/test/java/oracle/kubernetes/operator/helpers/IntrospectionStatusTest.java index 27ec5530dca..936b1d9b64d 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/helpers/IntrospectionStatusTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/helpers/IntrospectionStatusTest.java @@ -14,6 +14,8 @@ import io.kubernetes.client.openapi.models.V1ContainerStateBuilder; import io.kubernetes.client.openapi.models.V1ObjectMeta; import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodCondition; +import io.kubernetes.client.openapi.models.V1PodConditionBuilder; import io.kubernetes.client.openapi.models.V1PodSpec; import io.kubernetes.client.openapi.models.V1PodStatusBuilder; import oracle.kubernetes.operator.DomainProcessorDelegateStub; @@ -41,7 +43,9 @@ public class IntrospectionStatusTest { private static final String IMAGE_NAME = "abc"; private static final String MESSAGE = "asdf"; private static final String IMAGE_PULL_FAILURE = "ErrImagePull"; + private static final String UNSCHEDULABLE = "Unschedulable"; private static final String IMAGE_PULL_BACKOFF = "ImagePullBackoff"; + private static final String DEADLINE_EXCEEDED = "DeadlineExceeded"; private List mementos = new ArrayList<>(); private KubernetesTestSupport testSupport = new KubernetesTestSupport(); private Map> presenceInfoMap = new HashMap<>(); @@ -130,6 +134,30 @@ public void whenNewIntrospectorJobPodCreatedWithImagePullBackupStatus_patchDomai assertThat(updatedDomain.getStatus().getMessage(), equalTo(MESSAGE)); } + @Test + public void whenIntrospectorJobPodPendingWithUnschedulableStatus_patchDomain() { + processor.dispatchPodWatch( + WatchEvent.createModifiedEvent( + createIntrospectorJobPodWithConditions(createPodConditions(UNSCHEDULABLE, MESSAGE))) + .toWatchResponse()); + + Domain updatedDomain = testSupport.getResourceWithName(KubernetesTestSupport.DOMAIN, UID); + assertThat(updatedDomain.getStatus().getReason(), equalTo(UNSCHEDULABLE)); + assertThat(updatedDomain.getStatus().getMessage(), equalTo(MESSAGE)); + } + + @Test + public void whenIntrospectorJobPodPhaseFailed_patchDomain() { + processor.dispatchPodWatch( + WatchEvent.createModifiedEvent( + createIntrospectorJobPodWithPhase("Failed", DEADLINE_EXCEEDED)) + .toWatchResponse()); + + Domain updatedDomain = testSupport.getResourceWithName(KubernetesTestSupport.DOMAIN, UID); + assertThat(updatedDomain.getStatus().getReason(), equalTo(DEADLINE_EXCEEDED)); + assertThat(updatedDomain.getStatus().getMessage(), equalTo(MESSAGE)); + } + @Test public void whenNewIntrospectorJobPodStatusReasonNullAfterImagePullFailure_dontPatchDomain() { processor.dispatchPodWatch( @@ -142,8 +170,8 @@ public void whenNewIntrospectorJobPodStatusReasonNullAfterImagePullFailure_dontP .toWatchResponse()); Domain updatedDomain = testSupport.getResourceWithName(KubernetesTestSupport.DOMAIN, UID); - assertThat(updatedDomain.getStatus().getReason(), equalTo(IMAGE_PULL_FAILURE)); - assertThat(updatedDomain.getStatus().getMessage(), equalTo(MESSAGE)); + assertThat(updatedDomain.getStatus().getReason(), emptyOrNullString()); + assertThat(updatedDomain.getStatus().getMessage(), emptyOrNullString()); } private V1Pod createIntrospectorJobPod(V1ContainerState waitingState) { @@ -172,6 +200,24 @@ private V1Pod createIntrospectorJobPod(String domainUid) { .spec(new V1PodSpec())); } + private V1Pod createIntrospectorJobPodWithConditions(V1PodCondition condition) { + return createIntrospectorJobPod(UID) + .status( + new V1PodStatusBuilder() + .withConditions(condition) + .build()); + } + + private V1Pod createIntrospectorJobPodWithPhase(String phase, String reason) { + return createIntrospectorJobPod(UID) + .status( + new V1PodStatusBuilder() + .withPhase(phase) + .withReason(reason) + .withMessage(MESSAGE) + .build()); + } + private V1ContainerState createWaitingState(String reason, String message) { return new V1ContainerStateBuilder() .withNewWaiting() @@ -181,6 +227,14 @@ private V1ContainerState createWaitingState(String reason, String message) { .build(); } + private V1PodCondition createPodConditions(String reason, String message) { + return new V1PodConditionBuilder() + .withReason(reason) + .withMessage(message) + .build(); + } + + private String getPodSuffix() { return "-" + RandomStringUtils.randomAlphabetic(5).toLowerCase(); } diff --git a/operator/src/test/java/oracle/kubernetes/operator/helpers/JobHelperTest.java b/operator/src/test/java/oracle/kubernetes/operator/helpers/JobHelperTest.java index 021b94b97bf..ab718726ed1 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/helpers/JobHelperTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/helpers/JobHelperTest.java @@ -25,7 +25,6 @@ import io.kubernetes.client.openapi.models.V1PodSecurityContext; import io.kubernetes.client.openapi.models.V1PodSpec; import io.kubernetes.client.openapi.models.V1PodTemplateSpec; -import io.kubernetes.client.openapi.models.V1SecretReference; import io.kubernetes.client.openapi.models.V1SecurityContext; import io.kubernetes.client.openapi.models.V1Toleration; import oracle.kubernetes.operator.LabelConstants; @@ -49,6 +48,8 @@ import org.junit.Before; import org.junit.Test; +import static oracle.kubernetes.operator.DomainProcessorTestSetup.UID; +import static oracle.kubernetes.operator.DomainProcessorTestSetup.createTestDomain; import static oracle.kubernetes.operator.ProcessingConstants.DOMAIN_TOPOLOGY; import static oracle.kubernetes.operator.helpers.Matchers.hasContainer; import static oracle.kubernetes.operator.helpers.Matchers.hasEnvVar; @@ -76,8 +77,6 @@ public class JobHelperTest { - private static final String NS = "ns1"; - private static final String DOMAIN_UID = "JobHelperTestDomain"; private static final String RAW_VALUE_1 = "find uid1 at $(DOMAIN_HOME)"; private static final String END_VALUE_1 = "find uid1 at /u01/oracle/user_projects/domains"; /** @@ -87,7 +86,8 @@ public class JobHelperTest { */ private static final String OEVN = "OPERATOR_ENVVAR_NAMES"; private Method getDomainSpec; - private final DomainPresenceInfo domainPresenceInfo = createDomainPresenceInfo(); + private final Domain domain = createTestDomain(); + private final DomainPresenceInfo domainPresenceInfo = createDomainPresenceInfo(domain); private final V1PodSecurityContext podSecurityContext = createPodSecurityContext(123L); private final V1SecurityContext containerSecurityContext = createSecurityContext(555L); private final V1Affinity podAffinity = createAffinity(); @@ -108,6 +108,8 @@ public void setup() throws Exception { mementos.add(TuningParametersStub.install()); mementos.add(testSupport.install()); + domain.getSpec().setNodeName(null); + testSupport.defineResources(domain); testSupport.addDomainPresenceInfo(domainPresenceInfo); } @@ -294,7 +296,7 @@ public void whenDomainHasEnvironmentVars_introspectorPodStartupVerifyDataHomeEnv } private static final String OVERRIDE_DATA_DIR = "/u01/data"; - private static final String OVERRIDE_DATA_HOME = OVERRIDE_DATA_DIR + File.separator + DOMAIN_UID; + private static final String OVERRIDE_DATA_HOME = OVERRIDE_DATA_DIR + File.separator + UID; @Test public void whenDomainHasEnvironmentVars_introspectorPodStartupVerifyDataHomeEnvDefined() { @@ -492,7 +494,7 @@ public void podTemplate_hasCreateByOperatorLabel() { public void podTemplate_hasDomainUidLabel() { V1JobSpec jobSpec = createJobSpec(); - assertThat(getTemplateLabel(jobSpec, LabelConstants.DOMAINUID_LABEL), equalTo(DOMAIN_UID)); + assertThat(getTemplateLabel(jobSpec, LabelConstants.DOMAINUID_LABEL), equalTo(UID)); } @Test @@ -501,7 +503,7 @@ public void podTemplate_hasJobNameLabel() { assertThat( getTemplateLabel(jobSpec, LabelConstants.JOBNAME_LABEL), - equalTo(LegalNames.toJobIntrospectorName(DOMAIN_UID))); + equalTo(LegalNames.toJobIntrospectorName(UID))); } private String getTemplateLabel(V1JobSpec jobSpec, String labelKey) { @@ -569,7 +571,7 @@ public void introspectorPodContainerSpec_hasJobNameAsContainerName() { assertThat( getMatchingContainer(domainPresenceInfo, jobSpec).map(V1Container::getName).orElse(null), - is(JobHelper.createJobName(DOMAIN_UID))); + is(JobHelper.createJobName(UID))); } @Test @@ -830,16 +832,8 @@ private void defineTopology() { testSupport.addToPacket(DOMAIN_TOPOLOGY, configSupport.createDomainConfig()); } - private DomainPresenceInfo createDomainPresenceInfo() { - DomainPresenceInfo domainPresenceInfo = - new DomainPresenceInfo( - new Domain() - .withMetadata(new V1ObjectMeta().namespace(NS)) - .withSpec( - new DomainSpec() - .withDomainUid(DOMAIN_UID) - .withWebLogicCredentialsSecret( - new V1SecretReference().name("webLogicCredentialsSecretName")))); + private DomainPresenceInfo createDomainPresenceInfo(Domain domain) { + DomainPresenceInfo domainPresenceInfo = new DomainPresenceInfo(domain); configureDomain(domainPresenceInfo) .withDefaultServerStartPolicy(ConfigurationConstants.START_NEVER); return domainPresenceInfo; diff --git a/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupport.java b/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupport.java index 7ccf5ab8adc..b92ffe891ea 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupport.java +++ b/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupport.java @@ -49,6 +49,8 @@ import io.kubernetes.client.openapi.models.V1Job; import io.kubernetes.client.openapi.models.V1JobList; import io.kubernetes.client.openapi.models.V1ListMeta; +import io.kubernetes.client.openapi.models.V1Namespace; +import io.kubernetes.client.openapi.models.V1NamespaceList; import io.kubernetes.client.openapi.models.V1ObjectMeta; import io.kubernetes.client.openapi.models.V1PersistentVolume; import io.kubernetes.client.openapi.models.V1PersistentVolumeClaim; @@ -96,6 +98,7 @@ public class KubernetesTestSupport extends FiberTestSupport { public static final String BETA_CRD = "BetaCRD"; public static final String CONFIG_MAP = "ConfigMap"; public static final String CUSTOM_RESOURCE_DEFINITION = "CRD"; + public static final String NAMESPACE = "Namespace"; public static final String DOMAIN = "Domain"; public static final String EVENT = "Event"; public static final String JOB = "Job"; @@ -110,11 +113,17 @@ public class KubernetesTestSupport extends FiberTestSupport { public static final String SELF_SUBJECT_RULES_REVIEW = "SelfSubjectRulesReview"; public static final String TOKEN_REVIEW = "TokenReview"; - private static RequestParams REQUEST_PARAMS - = new RequestParams("testcall", "junit", "testName", "body"); + private static final String PATH_PATTERN = "\\w+(?:.\\w+)*"; + private static final String OP_PATTERN = "=|==|!="; + private static final String VALUE_PATTERN = ".*"; + private static final Pattern FIELD_PATTERN + = Pattern.compile("(" + PATH_PATTERN + ")(" + OP_PATTERN + ")(" + VALUE_PATTERN + ")"); - private Map> repositories = new HashMap<>(); - private Map, String> dataTypes = new HashMap<>(); + private static final RequestParams REQUEST_PARAMS + = new RequestParams("testcall", "junit", "testName", "body"); + + private final Map> repositories = new HashMap<>(); + private final Map, String> dataTypes = new HashMap<>(); private Failure failure; private long resourceVersion; private int numCalls; @@ -133,9 +142,10 @@ public Memento install() { support(SUBJECT_ACCESS_REVIEW, V1SubjectAccessReview.class); support(TOKEN_REVIEW, V1TokenReview.class); support(PV, V1PersistentVolume.class, this::createPvList); + support(NAMESPACE, V1Namespace.class, this::createNamespaceList); supportNamespaced(CONFIG_MAP, V1ConfigMap.class, this::createConfigMapList); - supportNamespaced(DOMAIN, Domain.class, this::createDomainList); + supportNamespaced(DOMAIN, Domain.class, this::createDomainList).withStatusSubresource(); supportNamespaced(EVENT, V1Event.class, this::createEventList); supportNamespaced(JOB, V1Job.class, this::createJobList); supportNamespaced(POD, V1Pod.class, this::createPodList); @@ -167,6 +177,10 @@ private V1PersistentVolumeClaimList createPvcList(List return new V1PersistentVolumeClaimList().metadata(createListMeta()).items(items); } + private V1NamespaceList createNamespaceList(List items) { + return new V1NamespaceList().metadata(createListMeta()).items(items); + } + private V1PodList createPodList(List items) { return new V1PodList().metadata(createListMeta()).items(items); } @@ -194,21 +208,25 @@ private void support(String resourceName, Class resourceClass) { @SuppressWarnings("SameParameterValue") private void support( - String resourceName, Class resourceClass, Function, Object> toList) { + String resourceName, Class resourceClass, Function, Object> toList) { dataTypes.put(resourceClass, resourceName); repositories.put(resourceName, new DataRepository<>(resourceClass, toList)); } - @SuppressWarnings("SameParameterValue") - private void supportNamespaced(String resourceName, Class resourceClass) { + @SuppressWarnings({"SameParameterValue", "UnusedReturnValue"}) + private NamespacedDataRepository supportNamespaced(String resourceName, Class resourceClass) { + final NamespacedDataRepository dataRepository = new NamespacedDataRepository<>(resourceClass, null); dataTypes.put(resourceClass, resourceName); - repositories.put(resourceName, new NamespacedDataRepository<>(resourceClass, null)); + repositories.put(resourceName, dataRepository); + return dataRepository; } - private void supportNamespaced( - String resourceName, Class resourceClass, Function, Object> toList) { + private NamespacedDataRepository supportNamespaced( + String resourceName, Class resourceClass, Function, Object> toList) { + final NamespacedDataRepository dataRepository = new NamespacedDataRepository<>(resourceClass, toList); dataTypes.put(resourceClass, resourceName); - repositories.put(resourceName, new NamespacedDataRepository<>(resourceClass, toList)); + repositories.put(resourceName, dataRepository); + return dataRepository; } /** @@ -253,10 +271,10 @@ public List getResources(String resourceType) { @SuppressWarnings("unchecked") public T getResourceWithName(String resourceType, String name) { return (T) - getResources(resourceType).stream() - .filter(o -> name.equals(KubernetesUtils.getResourceName(o))) - .findFirst() - .orElse(null); + getResources(resourceType).stream() + .filter(o -> name.equals(KubernetesUtils.getResourceName(o))) + .findFirst() + .orElse(null); } /** @@ -443,10 +461,10 @@ public String getName(RequestParams requestParams) { } static class Failure { - private String resourceType; - private String name; - private String namespace; - private ApiException apiException; + private final String resourceType; + private final String name; + private final String namespace; + private final ApiException apiException; private Operation operation; public Failure(String resourceType, String name, String namespace, int httpStatus) { @@ -472,9 +490,9 @@ public Failure(String resourceType, String name, String namespace, int httpStatu boolean matches(String resourceType, RequestParams requestParams, Operation operation) { return this.resourceType.equals(resourceType) - && (this.operation == null || this.operation == operation) - && Objects.equals(name, operation.getName(requestParams)) - && Objects.equals(namespace, requestParams.namespace); + && (this.operation == null || this.operation == operation) + && Objects.equals(name, operation.getName(requestParams)) + && Objects.equals(namespace, requestParams.namespace); } HttpErrorException getException() { @@ -483,7 +501,7 @@ HttpErrorException getException() { } static class HttpErrorException extends RuntimeException { - private ApiException apiException; + private final ApiException apiException; HttpErrorException(ApiException apiException) { this.apiException = apiException; @@ -517,17 +535,17 @@ private class AsyncRequestStepFactoryImpl implements AsyncRequestStepFactory { @Override public Step createRequestAsync( - ResponseStep next, - RequestParams requestParams, - CallFactory factory, - ClientPool helper, - int timeoutSeconds, - int maxRetryCount, - String fieldSelector, - String labelSelector, - String resourceVersion) { + ResponseStep next, + RequestParams requestParams, + CallFactory factory, + ClientPool helper, + int timeoutSeconds, + int maxRetryCount, + String fieldSelector, + String labelSelector, + String resourceVersion) { return new KubernetesTestSupport.SimulatedResponseStep( - next, requestParams, fieldSelector, labelSelector); + next, requestParams, fieldSelector, labelSelector); } } @@ -535,8 +553,8 @@ private class CallDispatcherImpl implements SynchronousCallDispatcher { @SuppressWarnings("unchecked") @Override public T execute( - SynchronousCallFactory factory, RequestParams requestParams, Pool helper) - throws ApiException { + SynchronousCallFactory factory, RequestParams requestParams, Pool helper) + throws ApiException { try { return (T) new CallContext(requestParams).execute(); } catch (HttpErrorException e) { @@ -550,31 +568,29 @@ static class DateTimeSerializer implements JsonDeserializer, JsonSeria @Override public DateTime deserialize( - final JsonElement je, final Type type, final JsonDeserializationContext jdc) - throws JsonParseException { + final JsonElement je, final Type type, final JsonDeserializationContext jdc) + throws JsonParseException { return je.isJsonObject() - ? new DateTime(Long.parseLong(je.getAsJsonObject().get("iMillis").getAsString())) - : DateTime.parse(je.getAsString()); + ? new DateTime(Long.parseLong(je.getAsJsonObject().get("iMillis").getAsString())) + : DateTime.parse(je.getAsString()); } @Override public JsonElement serialize( - final DateTime src, final Type typeOfSrc, final JsonSerializationContext context) { + final DateTime src, final Type typeOfSrc, final JsonSerializationContext context) { String retVal = src == null ? "" : DATE_FORMAT.print(src); return new JsonPrimitive(retVal); } } private class DataRepository { - private final String path = "\\w+(?:.\\w+)*"; - private final String op = "=|==|!="; - private final String value = ".*"; - private final Pattern fieldPat = Pattern.compile("(" + path + ")(" + op + ")(" + value + ")"); - private Map data = new HashMap<>(); - private Class resourceType; + private final Map data = new HashMap<>(); + private final Class resourceType; private Function, Object> listFactory; private List> onCreateActions = new ArrayList<>(); private List> onUpdateActions = new ArrayList<>(); + private Method getStatusMethod; + private Method setStatusMethod; public DataRepository(Class resourceType) { this.resourceType = resourceType; @@ -587,10 +603,28 @@ public DataRepository(Class resourceType, Function, Object> listFacto public DataRepository(Class resourceType, NamespacedDataRepository parent) { this.resourceType = resourceType; - onCreateActions = ((DataRepository) parent).onCreateActions; - onUpdateActions = ((DataRepository) parent).onUpdateActions; + copyFieldsFromParent(parent); + } + + public void copyFieldsFromParent(DataRepository parent) { + onCreateActions = parent.onCreateActions; + onUpdateActions = parent.onUpdateActions; + getStatusMethod = parent.getStatusMethod; + setStatusMethod = parent.setStatusMethod; } + @SuppressWarnings("UnusedReturnValue") + DataRepository withStatusSubresource() { + try { + getStatusMethod = resourceType.getMethod("getStatus"); + setStatusMethod = resourceType.getMethod("setStatus", getStatusMethod.getReturnType()); + } catch (NoSuchMethodException e) { + throw new RuntimeException("Resource type " + resourceType + " may not defined with a status subdomain"); + } + return this; + } + + @SuppressWarnings("unchecked") void createResourceInNamespace(String name, String namespace, Object resource) { data.put(name, (T) resource); @@ -630,9 +664,9 @@ Object listResources(String namespace, String fieldSelector, String... labelSele List getResources(String fieldSelector, String... labelSelectors) { return data.values().stream() - .filter(withFields(fieldSelector)) - .filter(withLabels(labelSelectors)) - .collect(Collectors.toList()); + .filter(withFields(fieldSelector)) + .filter(withLabels(labelSelectors)) + .collect(Collectors.toList()); } List getResources() { @@ -668,7 +702,7 @@ private boolean allSelectorsMatch(Object o, String fieldSelector) { } private boolean hasField(Object object, String fieldSpec) { - Matcher fieldMatcher = fieldPat.matcher(fieldSpec); + Matcher fieldMatcher = FIELD_PATTERN.matcher(fieldSpec); if (!fieldMatcher.find()) { return false; } @@ -679,11 +713,18 @@ private boolean hasField(Object object, String fieldSpec) { T replaceResource(String name, T resource) { setName(resource, name); + Optional.ofNullable(data.get(name)).ifPresent(old -> optionallyCopyStatusSubresource(old, resource)); data.put(name, withOptionalCreationTimeStamp(resource)); onUpdateActions.forEach(a -> a.accept(resource)); return resource; } + private void optionallyCopyStatusSubresource(T fromResource, T toResource) { + if (getStatusMethod != null) { + copyResourceStatus(fromResource, toResource); + } + } + T replaceResourceStatus(String name, T resource) { setName(resource, name); @@ -691,16 +732,30 @@ T replaceResourceStatus(String name, T resource) { if (current == null) { throw new IllegalStateException(); } + copyResourceStatus(resource, current); + incrementResourceVersion(getMetadata(current)); + onUpdateActions.forEach(a -> a.accept(current)); + return current; + } + + private void incrementResourceVersion(V1ObjectMeta metadata) { + metadata.setResourceVersion(incrementString(metadata.getResourceVersion())); + } + + private String incrementString(String string) { try { - Method getMethod = current.getClass().getMethod("getStatus"); - current.getClass().getMethod("setStatus", getMethod.getReturnType()) - .invoke(current, getMethod.invoke(resource)); - } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { - e.printStackTrace(); + return Integer.toString(Integer.parseInt(string) + 1); + } catch (NumberFormatException e) { + return "0"; } + } - onUpdateActions.forEach(a -> a.accept(current)); - return current; + private void copyResourceStatus(T fromResources, T toResource) { + try { + setStatusMethod.invoke(toResource, getStatusMethod.invoke(fromResources)); + } catch (NullPointerException | IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException("Status subresource not defined"); + } } V1Status deleteResource(String name, String namespace) { @@ -740,6 +795,7 @@ public T patchResource(String name, String namespace, V1Patch body) { JsonPatch patch = Json.createPatch(fromV1Patch(body)); JsonStructure result = patch.apply(toJsonStructure(data.get(name))); T resource = fromJsonStructure(result); + Optional.ofNullable(data.get(name)).ifPresent(old -> optionallyCopyStatusSubresource(old, resource)); data.put(name, resource); onUpdateActions.forEach(a -> a.accept(resource)); return resource; @@ -748,7 +804,7 @@ public T patchResource(String name, String namespace, V1Patch body) { @SuppressWarnings("unchecked") T fromJsonStructure(JsonStructure jsonStructure) { final GsonBuilder builder = - new GsonBuilder().registerTypeAdapter(DateTime.class, new DateTimeSerializer()); + new GsonBuilder().registerTypeAdapter(DateTime.class, new DateTimeSerializer()); return (T) builder.create().fromJson(jsonStructure.toString(), resourceType); } @@ -789,7 +845,7 @@ class FieldMatcher { private String value; FieldMatcher(String fieldSpec) { - Matcher fieldMatcher = fieldPat.matcher(fieldSpec); + Matcher fieldMatcher = FIELD_PATTERN.matcher(fieldSpec); if (fieldMatcher.find()) { path = fieldMatcher.group(1); op = fieldMatcher.group(2); @@ -830,9 +886,9 @@ private Object getSubField(Object object, String fieldName) { } private class NamespacedDataRepository extends DataRepository { - private Map> repositories = new HashMap<>(); - private Class resourceType; - private Function, Object> listFactory; + private final Map> repositories = new HashMap<>(); + private final Class resourceType; + private final Function, Object> listFactory; NamespacedDataRepository(Class resourceType, Function, Object> listFactory) { super(resourceType); @@ -980,7 +1036,7 @@ private V1Status deleteResource(DataRepository dataRepository) { private T patchResource(DataRepository dataRepository) { return dataRepository.patchResource( - requestParams.name, requestParams.namespace, (V1Patch) requestParams.body); + requestParams.name, requestParams.namespace, (V1Patch) requestParams.body); } private Object listResources(DataRepository dataRepository) { @@ -997,11 +1053,10 @@ public V1Status deleteCollection(DataRepository dataRepository) { } private class SimulatedResponseStep extends Step { - - private CallContext callContext; + private final CallContext callContext; SimulatedResponseStep( - Step next, RequestParams requestParams, String fieldSelector, String labelSelector) { + Step next, RequestParams requestParams, String fieldSelector, String labelSelector) { super(next); callContext = new CallContext(requestParams, fieldSelector, labelSelector); } @@ -1052,4 +1107,4 @@ public NotFoundException(String resourceType, String name, String namespace) { super(String.format("No %s named %s found in namespace %s", resourceType, name, namespace)); } } -} +} \ No newline at end of file diff --git a/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupportTest.java b/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupportTest.java index 4d037df6e6c..cc016f191b2 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupportTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/helpers/KubernetesTestSupportTest.java @@ -37,7 +37,10 @@ import oracle.kubernetes.utils.SystemClockTestSupport; import oracle.kubernetes.utils.TestUtils; import oracle.kubernetes.weblogic.domain.model.Domain; +import oracle.kubernetes.weblogic.domain.model.DomainCondition; +import oracle.kubernetes.weblogic.domain.model.DomainConditionType; import oracle.kubernetes.weblogic.domain.model.DomainList; +import oracle.kubernetes.weblogic.domain.model.DomainStatus; import org.joda.time.DateTime; import org.junit.After; import org.junit.Before; @@ -177,6 +180,23 @@ public void afterReplaceDomainWithTimeStampDisabled_timeStampIsNotChanged() { assertThat(getCreationTimestamp(updatedDomain), equalTo(getCreationTimestamp(originalDomain))); } + @Test + public void afterDomainStatusReplaced_resourceVersionIsIncremented() { + Domain originalDomain = createDomain(NS, "domain1"); + testSupport.defineResources(originalDomain); + originalDomain.getMetadata().setResourceVersion("123"); + + Step steps = new CallBuilder() + .replaceDomainStatusAsync("domain1", NS, + createDomain(NS, "domain1") + .withStatus(new DomainStatus().addCondition(new DomainCondition(DomainConditionType.Progressing))), + null); + testSupport.runSteps(steps); + + Domain updatedDomain = testSupport.getResourceWithName(DOMAIN, "domain1"); + assertThat(updatedDomain.getMetadata().getResourceVersion(), equalTo("124")); + } + @Test public void afterPatchDomainWithTimeStampEnabled_timeStampIsNotChanged() { Domain originalDomain = createDomain(NS, "domain1"); @@ -356,7 +376,7 @@ public void listDomain_returnsAllInNamespace() { } private Domain createDomain(String namespace, String name) { - return new Domain().withMetadata(new V1ObjectMeta().name(name).namespace(namespace)); + return new Domain().withMetadata(new V1ObjectMeta().name(name).namespace(namespace)).withStatus(new DomainStatus()); } @Test diff --git a/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStepTest.java b/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStepTest.java index 04deb857520..7c6fc54d324 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStepTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServerUpIteratorStepTest.java @@ -6,22 +6,33 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; +import javax.annotation.Nonnull; import com.meterware.simplestub.Memento; -import com.meterware.simplestub.StaticStubSupport; +import io.kubernetes.client.openapi.ApiException; import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodCondition; +import io.kubernetes.client.openapi.models.V1PodSpec; +import io.kubernetes.client.openapi.models.V1PodStatus; +import io.kubernetes.client.openapi.models.V1SecretReference; +import oracle.kubernetes.operator.KubernetesConstants; +import oracle.kubernetes.operator.LabelConstants; import oracle.kubernetes.operator.ProcessingConstants; import oracle.kubernetes.operator.helpers.DomainPresenceInfo; import oracle.kubernetes.operator.helpers.DomainPresenceInfo.ServerStartupInfo; -import oracle.kubernetes.operator.steps.ManagedServerUpIteratorStep.StartClusteredServersStep; -import oracle.kubernetes.operator.steps.ManagedServerUpIteratorStep.StartManagedServersStep; +import oracle.kubernetes.operator.helpers.KubernetesTestSupport; +import oracle.kubernetes.operator.helpers.LegalNames; +import oracle.kubernetes.operator.helpers.TuningParametersStub; import oracle.kubernetes.operator.utils.WlsDomainConfigSupport; -import oracle.kubernetes.operator.work.FiberTestSupport; +import oracle.kubernetes.operator.wlsconfig.WlsClusterConfig; +import oracle.kubernetes.operator.wlsconfig.WlsDomainConfig; +import oracle.kubernetes.operator.wlsconfig.WlsServerConfig; import oracle.kubernetes.operator.work.Step; -import oracle.kubernetes.operator.work.Step.StepAndPacket; import oracle.kubernetes.operator.work.TerminalStep; import oracle.kubernetes.utils.TestUtils; import oracle.kubernetes.weblogic.domain.ClusterConfigurator; @@ -34,43 +45,103 @@ import org.junit.Before; import org.junit.Test; -import static oracle.kubernetes.operator.steps.ManagedServerUpIteratorStepTest.TestStepFactory.getServers; -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.junit.MatcherAssert.assertThat; public class ManagedServerUpIteratorStepTest { - private static final String DOMAIN = "domain"; + protected static final String DOMAIN_NAME = "domain1"; private static final String NS = "namespace"; private static final String UID = "uid1"; + protected static final String KUBERNETES_UID = "12345"; private static final String ADMIN = "asName"; private static final String CLUSTER = "cluster1"; + private static final boolean INCLUDE_SERVER_OUT_IN_POD_LOG = true; + private static final String CREDENTIALS_SECRET_NAME = "webLogicCredentialsSecretName"; + private static final String LATEST_IMAGE = "image:latest"; + private static final String MS_PREFIX = "ms"; + private static final String MS1 = MS_PREFIX + "1"; + private static final String MS2 = MS_PREFIX + "2"; + private static final String MS3 = MS_PREFIX + "3"; + private static final String MS4 = MS_PREFIX + "4"; + private static final int MAX_SERVERS = 5; + private static final int PORT = 8001; + private static final String[] MANAGED_SERVER_NAMES = + IntStream.rangeClosed(1, MAX_SERVERS) + .mapToObj(ManagedServerUpIteratorStepTest::getManagedServerName).toArray(String[]::new); + + @Nonnull + private static String getManagedServerName(int n) { + return MS_PREFIX + n; + } + private final Domain domain = createDomain(); private final DomainConfigurator configurator = DomainConfiguratorFactory.forDomain(domain); - private WlsDomainConfigSupport configSupport = new WlsDomainConfigSupport(DOMAIN); - - private Step nextStep = new TerminalStep(); - private FiberTestSupport testSupport = new FiberTestSupport(); - private List mementos = new ArrayList<>(); - private DomainPresenceInfo domainPresenceInfo = createDomainPresenceInfo(); - private TestUtils.ConsoleHandlerMemento consoleHandlerMemento; + private final WlsDomainConfigSupport configSupport = new WlsDomainConfigSupport(DOMAIN_NAME); + + private final Step nextStep = new TerminalStep(); + private final KubernetesTestSupport testSupport = new KubernetesTestSupport(); + private final List mementos = new ArrayList<>(); + private final DomainPresenceInfo domainPresenceInfo = createDomainPresenceInfoWithServers(); + private final WlsDomainConfig domainConfig = createDomainConfig(); + + private static WlsDomainConfig createDomainConfig() { + WlsClusterConfig clusterConfig = new WlsClusterConfig(CLUSTER); + for (String serverName : MANAGED_SERVER_NAMES) { + clusterConfig.addServerConfig(new WlsServerConfig(serverName, "domain1-" + serverName, 8001)); + } + return new WlsDomainConfig("base_domain") + .withAdminServer(ADMIN, "domain1-admin-server", 7001) + .withCluster(clusterConfig); + } - private DomainPresenceInfo createDomainPresenceInfo() { - return new DomainPresenceInfo(domain); + private DomainPresenceInfo createDomainPresenceInfoWithServers(String... serverNames) { + DomainPresenceInfo dpi = new DomainPresenceInfo(domain); + addServer(dpi, ADMIN); + Arrays.asList(serverNames).forEach(serverName -> addServer(dpi, serverName)); + return dpi; } private Domain createDomain() { - return new Domain().withMetadata(createMetaData()).withSpec(createDomainSpec()); + return new Domain() + .withApiVersion(KubernetesConstants.DOMAIN_VERSION) + .withKind(KubernetesConstants.DOMAIN) + .withMetadata(new V1ObjectMeta().namespace(NS).name(DOMAIN_NAME).uid(KUBERNETES_UID)) + .withSpec(createDomainSpec()); } - private V1ObjectMeta createMetaData() { - return new V1ObjectMeta().namespace(NS); + private DomainSpec createDomainSpec() { + return new DomainSpec() + .withDomainUid(UID) + .withWebLogicCredentialsSecret(new V1SecretReference().name(CREDENTIALS_SECRET_NAME)) + .withIncludeServerOutInPodLog(INCLUDE_SERVER_OUT_IN_POD_LOG) + .withImage(LATEST_IMAGE); } - private DomainSpec createDomainSpec() { - return new DomainSpec().withDomainUid(UID).withReplicas(1); + private static void addServer(DomainPresenceInfo domainPresenceInfo, String serverName) { + if (serverName.equals(ADMIN)) { + domainPresenceInfo.setServerPod(serverName, createReadyPod(serverName)); + } else { + domainPresenceInfo.setServerPod(serverName, createPod(serverName)); + } + } + + private static V1Pod createReadyPod(String serverName) { + return new V1Pod().metadata(withNames(new V1ObjectMeta().namespace(NS), serverName)) + .spec(new V1PodSpec().nodeName("Node1")) + .status(new V1PodStatus().phase("Running") + .addConditionsItem(new V1PodCondition().type("Ready").status("True"))); + } + + private static V1Pod createPod(String serverName) { + return new V1Pod().metadata(withNames(new V1ObjectMeta().namespace(NS), serverName)); + } + + private static V1ObjectMeta withNames(V1ObjectMeta objectMeta, String serverName) { + return objectMeta + .name(LegalNames.toPodName(UID, serverName)) + .putLabelsItem(LabelConstants.SERVERNAME_LABEL, serverName); } /** @@ -79,9 +150,14 @@ private DomainSpec createDomainSpec() { */ @Before public void setUp() throws NoSuchFieldException { - mementos.add(consoleHandlerMemento = TestUtils.silenceOperatorLogger()); - mementos.add(TestStepFactory.install()); - testSupport.addDomainPresenceInfo(domainPresenceInfo); + mementos.add(TestUtils.silenceOperatorLogger().ignoringLoggedExceptions(ApiException.class)); + mementos.add(TuningParametersStub.install()); + mementos.add(testSupport.install()); + + testSupport.defineResources(domain); + testSupport + .addToPacket(ProcessingConstants.DOMAIN_TOPOLOGY, domainConfig) + .addDomainPresenceInfo(domainPresenceInfo); } /** @@ -97,80 +173,130 @@ public void tearDown() throws Exception { testSupport.throwOnCompletionFailure(); } - @Test - public void withConcurrencyOf1_bothClusteredServersStartSequentially() { - configureCluster(CLUSTER).withMaxConcurrentStartup(1); - addWlsCluster(CLUSTER, "ms1", "ms2"); + private void makePodReady(String serverName) { + domainPresenceInfo.getServerPod(serverName).status(new V1PodStatus().phase("Running")); + Objects.requireNonNull(domainPresenceInfo.getServerPod(serverName).getStatus()) + .addConditionsItem(new V1PodCondition().status("True").type("Ready")); + } - invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER,"ms1", "ms2")); + private void schedulePod(String serverName, String nodeName) { + Objects.requireNonNull(domainPresenceInfo.getServerPod(serverName).getSpec()).setNodeName(nodeName); + } - assertThat(getServers(), hasItem(Arrays.asList("ms1", "ms2"))); - assertThat(getServers().size(), equalTo(1)); + @Test + public void withConcurrencyOf1_bothClusteredServersScheduleAndStartSequentially() { + configureCluster(CLUSTER).withMaxConcurrentStartup(1); + //addWlsCluster(CLUSTER, 8001, MS1, MS2); + addWlsCluster(CLUSTER, 8001, MS1, MS2); + + invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER,MS1, MS2)); + + assertThat(MS1 + " pod", domainPresenceInfo.getServerPod(MS1), notNullValue()); + schedulePod(MS1, "Node1"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), nullValue()); + makePodReady(MS1); + testSupport.setTime(10, TimeUnit.SECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), notNullValue()); } @Test - public void withConcurrencyOf0_bothClusteredServersStartConcurrently() { + public void withConcurrencyOf0_clusteredServersScheduleSequentiallyAndStartConcurrently() { configureCluster(CLUSTER).withMaxConcurrentStartup(0); - addWlsCluster(CLUSTER, "ms1", "ms2"); + addWlsCluster(CLUSTER, PORT, MS1, MS2); - invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER,"ms1", "ms2")); + invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER,MS1, MS2)); - assertThat(getServers(), allOf(hasItem("ms1"), hasItem("ms2"))); + assertThat(MS1 + " pod", domainPresenceInfo.getServerPod(MS1), notNullValue()); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), nullValue()); + schedulePod(MS1, "Node1"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), notNullValue()); } @Test - public void withConcurrencyOf2_bothClusteredServersStartConcurrently() { + public void withConcurrencyOf2_clusteredServersScheduleSequentiallyAndStartConcurrently() { configureCluster(CLUSTER).withMaxConcurrentStartup(2); - addWlsCluster(CLUSTER, "ms1", "ms2"); + addWlsCluster(CLUSTER, PORT, MS1, MS2); - invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER, "ms1", "ms2")); + invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER, MS1, MS2)); - assertThat(getServers(), allOf(hasItem("ms1"), hasItem("ms2"))); + assertThat(MS1 + " pod", domainPresenceInfo.getServerPod(MS1), notNullValue()); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), nullValue()); + schedulePod(MS1, "Node1"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), notNullValue()); } @Test - public void withConcurrencyOf2_4clusteredServersStartIn2Threads() { + public void withConcurrencyOf2_4clusteredServersScheduleSequentiallyAndStartIn2Threads() { configureCluster(CLUSTER).withMaxConcurrentStartup(2); - addWlsCluster(CLUSTER, "ms1", "ms2", "ms3", "ms4"); - - invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER, "ms1", "ms2", "ms3", "ms4")); - - assertThat(getServers(), hasItem(Arrays.asList("ms1", "ms2", "ms3", "ms4"))); - assertThat(getServers().size(), equalTo(2)); + addWlsCluster(CLUSTER, PORT, MS1, MS2, MS3, MS4); + + invokeStepWithServerStartupInfos(createServerStartupInfosForCluster(CLUSTER, MS1, MS2, MS3, MS4)); + assertThat(MS1 + " pod", domainPresenceInfo.getServerPod(MS1), notNullValue()); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), nullValue()); + schedulePod(MS1, "Node1"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), notNullValue()); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), nullValue()); + schedulePod(MS2, "Node2"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), nullValue()); + makePodReady(MS1); + testSupport.setTime(10, TimeUnit.SECONDS); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), notNullValue()); + assertThat(MS4 + " pod", domainPresenceInfo.getServerPod(MS4), nullValue()); + makePodReady(MS2); + schedulePod(MS3, "Node3"); + testSupport.setTime(10, TimeUnit.SECONDS); + assertThat(MS4 + " pod", domainPresenceInfo.getServerPod(MS4), notNullValue()); } @Test - public void withMultipleClusters_differentClusterStartDifferently() { + public void withMultipleClusters_differentClusterScheduleAndStartDifferently() { final String CLUSTER2 = "cluster2"; - configureCluster(CLUSTER).withMaxConcurrentStartup(1); - configureCluster(CLUSTER2).withMaxConcurrentStartup(0); - addWlsCluster(CLUSTER, "ms1", "ms2"); - addWlsCluster(CLUSTER2, "ms3", "ms4"); + configureCluster(CLUSTER).withMaxConcurrentStartup(0); + configureCluster(CLUSTER2).withMaxConcurrentStartup(1); - Collection serverStartupInfos = createServerStartupInfosForCluster(CLUSTER, "ms1", "ms2"); - serverStartupInfos.addAll(createServerStartupInfosForCluster(CLUSTER2, "ms3", "ms4")); + addWlsCluster(CLUSTER, PORT, MS1, MS2); + addWlsCluster(CLUSTER2, PORT, MS3, MS4); + + Collection serverStartupInfos = createServerStartupInfosForCluster(CLUSTER, MS1, MS2); + serverStartupInfos.addAll(createServerStartupInfosForCluster(CLUSTER2, MS3, MS4)); invokeStepWithServerStartupInfos(serverStartupInfos); - assertThat(getServers(), hasItem(Arrays.asList("ms1", "ms2"))); - assertThat(getServers(), allOf(hasItem("ms3"), hasItem("ms4"))); + assertThat(MS1 + " pod", domainPresenceInfo.getServerPod(MS1), notNullValue()); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), notNullValue()); + schedulePod(MS1, "Node1"); + schedulePod(MS3, "Node2"); + testSupport.setTime(100, TimeUnit.MILLISECONDS); + assertThat(MS2 + " pod", domainPresenceInfo.getServerPod(MS2), notNullValue()); + assertThat(MS4 + " pod", domainPresenceInfo.getServerPod(MS4), nullValue()); + //makePodReady(MS3); + //k8sTestSupport.setTime(10, TimeUnit.SECONDS); + //assertThat(MS4 + " pod", domainPresenceInfo.getServerPod(MS4), notNullValue()); } @Test public void maxClusterConcurrentStartup_doesNotApplyToNonClusteredServers() { domain.getSpec().setMaxClusterConcurrentStartup(1); - addWlsServers("ms3", "ms4"); + addWlsServers(MS3, MS4); - invokeStepWithServerStartupInfos(createServerStartupInfos("ms3", "ms4")); + invokeStepWithServerStartupInfos(createServerStartupInfos(MS3, MS4)); - assertThat(getServers(), allOf(hasItem("ms3"), hasItem("ms4"))); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), notNullValue()); + schedulePod(MS3, "Node2"); + testSupport.setTime(200, TimeUnit.MILLISECONDS); + assertThat(MS3 + " pod", domainPresenceInfo.getServerPod(MS3), notNullValue()); } @NotNull private Collection createServerStartupInfosForCluster(String clusterName, String... servers) { Collection serverStartupInfos = new ArrayList<>(); - Arrays.asList(servers).stream().forEach(server -> + Arrays.stream(servers).forEach(server -> serverStartupInfos.add( new ServerStartupInfo(configSupport.getWlsServer(clusterName, server), clusterName, @@ -183,7 +309,7 @@ private Collection createServerStartupInfosForCluster(String @NotNull private Collection createServerStartupInfos(String... servers) { Collection serverStartupInfos = new ArrayList<>(); - Arrays.asList(servers).stream().forEach(server -> + Arrays.stream(servers).forEach(server -> serverStartupInfos.add( new ServerStartupInfo(configSupport.getWlsServer(server), null, @@ -195,10 +321,6 @@ private Collection createServerStartupInfos(String... servers private void invokeStepWithServerStartupInfos(Collection startupInfos) { ManagedServerUpIteratorStep step = new ManagedServerUpIteratorStep(startupInfos, nextStep); - // configSupport.setAdminServerName(ADMIN); - - testSupport.addToPacket( - ProcessingConstants.DOMAIN_TOPOLOGY, configSupport.createDomainConfig()); testSupport.runSteps(step); } @@ -207,49 +329,15 @@ private ClusterConfigurator configureCluster(String clusterName) { } private void addWlsServers(String... serverNames) { - Arrays.asList(serverNames).forEach(serverName -> addWlsServer(serverName)); + Arrays.asList(serverNames).forEach(this::addWlsServer); } private void addWlsServer(String serverName) { - configSupport.addWlsServer(serverName); - } - - private void addWlsCluster(String clusterName, String... serverNames) { - configSupport.addWlsCluster(clusterName, serverNames); + configSupport.addWlsServer(serverName, 8001); } - static class TestStepFactory implements ManagedServerUpIteratorStep.NextStepFactory { + private void addWlsCluster(String clusterName, int port, String... serverNames) { - private static Step next; - private static TestStepFactory factory = new TestStepFactory(); - - private static Memento install() throws NoSuchFieldException { - return StaticStubSupport.install(ManagedServerUpIteratorStep.class, "NEXT_STEP_FACTORY", factory); - } - - static Collection getServers() { - if (next instanceof StartManagedServersStep) { - return ((StartManagedServersStep)next).getStartDetails() - .stream() - .map(serverToStart -> getServerFromStepAndPacket(serverToStart)).collect(Collectors.toList()); - } - return Collections.emptyList(); - } - - static Object getServerFromStepAndPacket(StepAndPacket startDetail) { - if (startDetail.step instanceof StartClusteredServersStep) { - Collection serversToStart = ((StartClusteredServersStep)startDetail.step).getServersToStart(); - return serversToStart.stream().map(serverToStart -> getServerFromStepAndPacket(serverToStart)) - .collect(Collectors.toList()); - } - return startDetail.packet.get(ProcessingConstants.SERVER_NAME); - } - - @Override - public Step createStatusUpdateStep(Step next) { - TestStepFactory.next = next; - return new TerminalStep(); - } + configSupport.addWlsCluster(clusterName, port, serverNames); } - -} +} \ No newline at end of file diff --git a/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServersUpStepTest.java b/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServersUpStepTest.java index 30d813b57ee..f6611926636 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServersUpStepTest.java +++ b/operator/src/test/java/oracle/kubernetes/operator/steps/ManagedServersUpStepTest.java @@ -16,6 +16,7 @@ import io.kubernetes.client.openapi.models.V1EnvVar; import io.kubernetes.client.openapi.models.V1ObjectMeta; import io.kubernetes.client.openapi.models.V1Pod; +import oracle.kubernetes.operator.DomainStatusUpdater; import oracle.kubernetes.operator.LabelConstants; import oracle.kubernetes.operator.ProcessingConstants; import oracle.kubernetes.operator.helpers.DomainPresenceInfo; @@ -483,7 +484,7 @@ public void whenNotShuttingDown_dontInsertCreateAvailableStep() { public void whenShuttingDownAtLeastOneServer_prependServerDownIteratorStep() { addServer(domainPresenceInfo, "server1"); - assertThat(createNextStep(), instanceOf(ServerDownIteratorStep.class)); + assertThat(skipProgressingStep(createNextStep()), instanceOf(ServerDownIteratorStep.class)); } @Test @@ -493,7 +494,7 @@ public void whenExclusionsSpecified_doNotAddToListOfServers() { addServer(domainPresenceInfo, "server3"); addServer(domainPresenceInfo, ADMIN); - assertStoppingServers(createNextStepWithout("server2"), "server1", "server3"); + assertStoppingServers(skipProgressingStep(createNextStepWithout("server2")), "server1", "server3"); } @Test @@ -505,7 +506,7 @@ public void whenShuttingDown_allowAdminServerNameInListOfServers() { addServer(domainPresenceInfo, "server3"); addServer(domainPresenceInfo, ADMIN); - assertStoppingServers(createNextStepWithout("server2"), "server1", "server3", ADMIN); + assertStoppingServers(skipProgressingStep(createNextStepWithout("server2")), "server1", "server3", ADMIN); } @Test @@ -595,6 +596,13 @@ public void whenDomainToplogyIsMissing_noExceptionAndDontStartServers() { assertServersWillNotBeStarted(); } + private static Step skipProgressingStep(Step step) { + if (step instanceof DomainStatusUpdater.ProgressingStep) { + return step.getNext(); + } + return step; + } + private void assertStoppingServers(Step step, String... servers) { assertThat(((ServerDownIteratorStep) step).getServersToStop(), containsInAnyOrder(servers)); } diff --git a/operator/src/test/java/oracle/kubernetes/operator/utils/WlsDomainConfigSupport.java b/operator/src/test/java/oracle/kubernetes/operator/utils/WlsDomainConfigSupport.java index 5c3171b3951..2c502e9c28e 100644 --- a/operator/src/test/java/oracle/kubernetes/operator/utils/WlsDomainConfigSupport.java +++ b/operator/src/test/java/oracle/kubernetes/operator/utils/WlsDomainConfigSupport.java @@ -118,13 +118,25 @@ public WlsServerConfig getWlsServer(String clusterName, String serverName) { * @param serverNames the names of the servers */ public void addWlsCluster(String clusterName, String... serverNames) { + addWlsCluster(clusterName, null, serverNames); + } + + /** + * Adds a WLS cluster to the configuration, including its member servers. + * + * @param clusterName the name of the cluster + * @param port - the port of the servers + * @param serverNames the names of the servers + */ + public void addWlsCluster(String clusterName, Integer port, String... serverNames) { ClusterConfigBuilder builder = new ClusterConfigBuilder(clusterName); for (String serverName : serverNames) { - builder.addServer(serverName); + builder.addServer(serverName, port); } wlsClusters.put(clusterName, builder.build()); } + /** * Returns the configuration for the named cluster, if any has been defined. * diff --git a/pom.xml b/pom.xml index 7f77a1597a6..843587fac73 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 operator diff --git a/src/integration-tests/introspector/wl-create-domain-pod.pyt b/src/integration-tests/introspector/wl-create-domain-pod.pyt index 0620d91244d..220ef160ec7 100644 --- a/src/integration-tests/introspector/wl-create-domain-pod.pyt +++ b/src/integration-tests/introspector/wl-create-domain-pod.pyt @@ -36,8 +36,13 @@ readTemplate("/u01/oracle/wlserver/common/templates/wls/wls.jar") set('Name', '${DOMAIN_NAME}') setOption('DomainName', '${DOMAIN_NAME}') + +#cd('/') #create('${DOMAIN_NAME}','Log') #cd('/Log/${DOMAIN_NAME}'); +#set('FileMinSize', 1) +#set('FileCount', 2) +#set('RotateLogOnStartup', 'true') # Configure the Administration Server # =================================== @@ -80,10 +85,13 @@ ssl = create('${ADMIN_NAME}','SSL') cd('/Servers/${ADMIN_NAME}/SSL/${ADMIN_NAME}') set('Enabled', 'true') -#cd('/Servers/${ADMIN_NAME}') -#create('${ADMIN_NAME}', 'Log') -#cd('/Servers/${ADMIN_NAME}/Log/${ADMIN_NAME}') -#set('FileName', '${LOG_HOME}/${ADMIN_NAME}.log') +# cd('/Servers/${ADMIN_NAME}') +# create('${ADMIN_NAME}', 'Log') +# cd('/Servers/${ADMIN_NAME}/Log/${ADMIN_NAME}') +# set('FileMinSize', 2) +# set('FileCount', 2) +# set('RotateLogOnStartup', 'true') +## set('FileName', '${LOG_HOME}/${ADMIN_NAME}.log') # Set the admin user's username and password @@ -260,9 +268,17 @@ if '${CLUSTER_TYPE}' == "CONFIGURED": set('RetryIntervalBeforeMSIMode', 1) set('Cluster', '${CLUSTER_NAME}') + #cd('/Servers/%s/' % name ) #create(name,'Log') #cd('/Servers/%s/Log/%s' % (name, name)) - #set('FileName', '${LOG_HOME}/%s.log' % name) + #set('FileMinSize', 3) + #set('FileCount', 3) + #set('RotateLogOnStartup', 'true') + ## set('FileName', '${LOG_HOME}/%s.log' % name) + + # HTTP access log + # cd('/Servers/'+serverName1+'/WebServer/'+serverName1+'/WebServerLog/'+serverName1) + else: print('Configuring Dynamic Cluster %s' % '${CLUSTER_NAME}') @@ -274,6 +290,15 @@ else: cmo.setListenPort(${MANAGED_SERVER_PORT}) #cmo.setListenAddress('${DOMAIN_UID}-${MANAGED_SERVER_NAME_BASE}${id}') # subst-ignore-missing cmo.setCluster(cl) + + # cd('/ServerTemplates/%s' % templateName) + # create(templateName,'Log') + # cd('/ServerTemplates/%s/Log/%s' % (templateName, templateName)) + # set('FileMinSize', 3) + # set('FileCount', 3) + # set('RotateLogOnStartup', 'true') + + print('Done setting attributes for Server Template: %s' % templateName); templateName = '${CLUSTER_NAME}' + "-template-dummy1" diff --git a/src/integration-tests/introspector/wl-pod.yamlt b/src/integration-tests/introspector/wl-pod.yamlt index 77aec789d13..83b0cb934f9 100644 --- a/src/integration-tests/introspector/wl-pod.yamlt +++ b/src/integration-tests/introspector/wl-pod.yamlt @@ -18,6 +18,8 @@ spec: env: - name: JAVA_OPTIONS value: "-Djava.security.egd=file:/dev/./urandom " + #value: "-Djava.security.egd=file:/dev/./urandom -Dweblogic.log.StdoutSeverityLevel=Debug -Dweblogic.log.LogSeverity=Debug -Dweblogic.StdoutDebugEnabled=true -Dweblogic.debug.DebugStoreIOPhysicalVerbose=true -Dweblogic.kernel.debug=true -Dweblogic.debug.DebugConnection=true -Dweblogic.debug.DebugMessaging=true -Dweblogic.debug.DebugJNDI=true -Dweblogic.debug.DebugJNDIFactories=true -Dweblogic.debug.DebugJNDIResolution=true" + #value: "-Djava.security.egd=file:/dev/./urandom -Dweblogic.log.RotateLogOnStartup=true -Dweblogic.log.FileMinSize=1 -Dweblogic.log.FileCount=1 -Dweblogic.log.StdoutSeverityLevel=Debug -Dweblogic.log.LogSeverity=Debug -Dweblogic.StdoutDebugEnabled=true -Dweblogic.debug.DebugStoreIOPhysicalVerbose=true -Dweblogic.kernel.debug=true -Dweblogic.debug.DebugConnection=true -Dweblogic.debug.DebugMessaging=true -Dweblogic.debug.DebugJNDI=true -Dweblogic.debug.DebugJNDIFactories=true -Dweblogic.debug.DebugJNDIResolution=true" - name: USER_MEM_ARGS value: "-XX:MaxRAMFraction=1 " - name: NODEMGR_JAVA_OPTIONS diff --git a/src/integration-tests/model-in-image/deploy-operator.sh b/src/integration-tests/model-in-image/deploy-operator.sh index 38e312c62c0..2e65d838cac 100755 --- a/src/integration-tests/model-in-image/deploy-operator.sh +++ b/src/integration-tests/model-in-image/deploy-operator.sh @@ -36,6 +36,9 @@ mkdir -p $WORKDIR/test-out if [ -e $WORKDIR/test-out/operator-values.orig ]; then helm get values ${OPER_NAME} -n ${OPER_NAMESPACE} > $WORKDIR/test-out/operator-values.cur 2>&1 helm list -n ${OPER_NAMESPACE} | awk '{ print $1 }' >> $WORKDIR/test-out/operator-values.cur + for evar in DOMAIN_NAMESPACE OPER_NAMESPACE OPER_NAME OPER_IMAGE OPER_SA ; do + echo "${evar}=${!evar}" >> $WORKDIR/test-out/operator-values.cur + done if [ "$(cat $WORKDIR/test-out/operator-values.cur)" = "$(cat $WORKDIR/test-out/operator-values.orig)" ]; then echo "@@" echo "@@ Operator already running. Skipping." @@ -69,6 +72,9 @@ kubectl get deployments -n $OPER_NAMESPACE helm get values ${OPER_NAME} -n ${OPER_NAMESPACE} > $WORKDIR/test-out/operator-values.orig 2>&1 helm list -n ${OPER_NAMESPACE} | awk '{ print $1 }' >> $WORKDIR/test-out/operator-values.orig +for evar in DOMAIN_NAMESPACE OPER_NAMESPACE OPER_NAME OPER_IMAGE OPER_SA ; do + echo "${evar}=${!evar}" >> $WORKDIR/test-out/operator-values.orig +done echo "@@ log command: kubectl logs -n $OPER_NAMESPACE -c weblogic-operator deployments/weblogic-operator" diff --git a/src/integration-tests/model-in-image/mii-sample-wrapper/build-model-image.sh b/src/integration-tests/model-in-image/mii-sample-wrapper/build-model-image.sh index 2c0f0eb6e21..c7a46de14b5 100755 --- a/src/integration-tests/model-in-image/mii-sample-wrapper/build-model-image.sh +++ b/src/integration-tests/model-in-image/mii-sample-wrapper/build-model-image.sh @@ -72,6 +72,9 @@ function output_dryrun() { MODEL_YAML_FILES="$(ls $WORKDIR/$MODEL_DIR/*.yaml | xargs | sed 's/ /,/g')" MODEL_ARCHIVE_FILES=$WORKDIR/$MODEL_DIR/archive.zip MODEL_VARIABLE_FILES="$(ls $WORKDIR/$MODEL_DIR/*.properties | xargs | sed 's/ /,/g')" +if [ "$WDT_DOMAIN_TYPE" = "WLS" ]; then + CHOWN_ROOT="--chown oracle:root" +fi cat << EOF @@ -109,6 +112,7 @@ dryrun: ${MODEL_YAML_FILES:+--wdtModel ${MODEL_YAML_FILES}} \\ dryrun: ${MODEL_VARIABLE_FILES:+--wdtVariables ${MODEL_VARIABLE_FILES}} \\ dryrun: ${MODEL_ARCHIVE_FILES:+--wdtArchive ${MODEL_ARCHIVE_FILES}} \\ dryrun: --wdtModelOnly \\ +dryrun: ${CHOWN_ROOT:+${CHOWN_ROOT}} \\ dryrun: --wdtDomainType ${WDT_DOMAIN_TYPE} dryrun: dryrun:echo "@@ Info: Success! Model image '$MODEL_IMAGE' build complete. Seconds=\$SECONDS." @@ -161,4 +165,3 @@ else echo "@@ Info: Done!" fi - diff --git a/src/scripts/initialize-internal-operator-identity.sh b/src/scripts/initialize-internal-operator-identity.sh index b1d21325c5b..285c6b3af0a 100755 --- a/src/scripts/initialize-internal-operator-identity.sh +++ b/src/scripts/initialize-internal-operator-identity.sh @@ -73,7 +73,6 @@ function generateInternalIdentity { -srckeystore ${OP_JKS} \ -srcstorepass ${TEMP_PW} \ -destkeystore ${OP_PKCS12} \ - -srcstorepass ${TEMP_PW} \ -deststorepass ${TEMP_PW} \ -deststoretype PKCS12 diff --git a/swagger/pom.xml b/swagger/pom.xml index 5c521264995..65b832e7277 100644 --- a/swagger/pom.xml +++ b/swagger/pom.xml @@ -7,7 +7,7 @@ oracle.kubernetes operator-parent - 3.0.1 + 3.0.2 operator-swagger