diff --git a/garden-service/src/plugins/kubernetes/container/deployment.ts b/garden-service/src/plugins/kubernetes/container/deployment.ts index 2ccff1a315..9914d3b978 100644 --- a/garden-service/src/plugins/kubernetes/container/deployment.ts +++ b/garden-service/src/plugins/kubernetes/container/deployment.ts @@ -13,7 +13,7 @@ import { extend, find, keyBy, merge, set } from "lodash" import { ContainerModule, ContainerService } from "../../container/config" import { createIngressResources } from "./ingress" import { createServiceResources } from "./service" -import { waitForResources, compareDeployedObjects } from "../status/status" +import { waitForResources, compareDeployedResources } from "../status/status" import { apply, deleteObjectsBySelector } from "../kubectl" import { getAppNamespace } from "../namespace" import { PluginContext } from "../../../plugin-context" @@ -144,7 +144,7 @@ export async function deployContainerServiceBlueGreen( const serviceManifest = find(manifests, (manifest) => manifest.kind == "Service") const patchedServiceManifest = merge(serviceManifest, servicePatchBody) // Compare with the deployed Service - const result = await compareDeployedObjects(k8sCtx, api, namespace, [patchedServiceManifest], log, true) + const result = await compareDeployedResources(k8sCtx, api, namespace, [patchedServiceManifest], log) // If the result is outdated it means something in the Service definition itself changed // and we need to apply the whole Service manifest. Otherwise we just patch it. diff --git a/garden-service/src/plugins/kubernetes/container/status.ts b/garden-service/src/plugins/kubernetes/container/status.ts index 3f00f421ce..667f0f222a 100644 --- a/garden-service/src/plugins/kubernetes/container/status.ts +++ b/garden-service/src/plugins/kubernetes/container/status.ts @@ -16,7 +16,7 @@ import { sleep } from "../../../util/util" import { GetServiceStatusParams } from "../../../types/plugin/service/getServiceStatus" import { ContainerModule } from "../../container/config" import { KubeApi } from "../api" -import { compareDeployedObjects as compareDeployedResources } from "../status/status" +import { compareDeployedResources } from "../status/status" import { getIngresses } from "./ingress" import { getAppNamespace } from "../namespace" import { KubernetesPluginContext } from "../config" @@ -47,7 +47,7 @@ export async function getContainerServiceStatus({ // FIXME: [objects, matched] and ingresses can be run in parallel const { workload, manifests } = await createContainerManifests(k8sCtx, log, service, runtimeContext, hotReload) - const { state, remoteResources } = await compareDeployedResources(k8sCtx, api, namespace, manifests, log, true) + const { state, remoteResources } = await compareDeployedResources(k8sCtx, api, namespace, manifests, log) const ingresses = await getIngresses(service, api, provider) const forwardablePorts: ForwardablePort[] = service.spec.ports diff --git a/garden-service/src/plugins/kubernetes/helm/status.ts b/garden-service/src/plugins/kubernetes/helm/status.ts index 43bbdcbd8f..aa1b895dff 100644 --- a/garden-service/src/plugins/kubernetes/helm/status.ts +++ b/garden-service/src/plugins/kubernetes/helm/status.ts @@ -8,14 +8,13 @@ import { ServiceStatus, ServiceState } from "../../../types/service" import { GetServiceStatusParams } from "../../../types/plugin/service/getServiceStatus" -import { getExecModuleBuildStatus } from "../../exec" -import { compareDeployedObjects } from "../status/status" +import { compareDeployedResources } from "../status/status" import { KubeApi } from "../api" import { getAppNamespace } from "../namespace" import { LogEntry } from "../../../logger/log-entry" import { helm } from "./helm-cli" import { HelmModule } from "./config" -import { getChartResources, findServiceResource } from "./common" +import { getChartResources, findServiceResource, getReleaseName } from "./common" import { buildHelmModule } from "./build" import { configureHotReload } from "../hot-reload" import { getHotReloadSpec } from "./hot-reload" @@ -37,7 +36,7 @@ const helmStatusCodeMap: { [code: number]: ServiceState } = { } interface HelmStatusDetail { - remoteResources: KubernetesServerResource[] + remoteResources?: KubernetesServerResource[] } export type HelmServiceStatus = ServiceStatus @@ -51,15 +50,19 @@ export async function getServiceStatus({ }: GetServiceStatusParams): Promise { const k8sCtx = ctx // need to build to be able to check the status - const buildStatus = await getExecModuleBuildStatus({ ctx: k8sCtx, module, log }) - if (!buildStatus.ready) { - await buildHelmModule({ ctx: k8sCtx, module, log }) - } + await buildHelmModule({ ctx: k8sCtx, module, log }) // first check if the installed objects on the cluster match the current code const chartResources = await getChartResources(k8sCtx, module, log) + const provider = k8sCtx.provider + const namespace = await getAppNamespace(k8sCtx, log, provider) + const releaseName = getReleaseName(module) + + const detail: HelmStatusDetail = {} + let state: ServiceState if (hotReload) { + // If we're running with hot reload enabled, we need to alter the appropriate resources and then compare directly. const target = await findServiceResource({ ctx: k8sCtx, log, chartResources, module }) const hotReloadSpec = getHotReloadSpec(service) const resourceSpec = module.spec.serviceResource! @@ -70,17 +73,23 @@ export async function getServiceStatus({ hotReloadArgs: resourceSpec.hotReloadArgs, containerName: resourceSpec.containerName, }) - } - const provider = k8sCtx.provider - const api = await KubeApi.factory(log, provider) - const namespace = await getAppNamespace(k8sCtx, log, provider) - - let { state, remoteResources } = await compareDeployedObjects(k8sCtx, api, namespace, chartResources, log, false) + const api = await KubeApi.factory(log, provider) - const forwardablePorts = getForwardablePorts(remoteResources) + const comparison = await compareDeployedResources(k8sCtx, api, namespace, chartResources, log) + state = comparison.state + detail.remoteResources = comparison.remoteResources + } else { + // Otherwise we trust Helm to report the status of the chart. + try { + const helmStatus = await getReleaseStatus(k8sCtx, releaseName, log) + state = helmStatus.state + } catch (err) { + state = "missing" + } + } - const detail = { remoteResources } + const forwardablePorts = getForwardablePorts(chartResources) return { forwardablePorts, diff --git a/garden-service/src/plugins/kubernetes/kubectl.ts b/garden-service/src/plugins/kubernetes/kubectl.ts index 517e3110f1..9f73621a02 100644 --- a/garden-service/src/plugins/kubernetes/kubectl.ts +++ b/garden-service/src/plugins/kubernetes/kubectl.ts @@ -11,11 +11,14 @@ import { encodeYamlMulti } from "../../util/util" import { BinaryCmd, ExecParams } from "../../util/ext-tools" import { LogEntry } from "../../logger/log-entry" import { KubernetesProvider } from "./config" +import { KubernetesResource } from "./types" +import { gardenAnnotationKey } from "../../util/string" +import stringify from "json-stable-stringify" export interface ApplyParams { log: LogEntry provider: KubernetesProvider - manifests: object[] + manifests: KubernetesResource[] dryRun?: boolean force?: boolean pruneSelector?: string @@ -27,13 +30,25 @@ export const KUBECTL_DEFAULT_TIMEOUT = 300 export async function apply({ log, provider, - manifests: objects, + manifests, dryRun = false, force = false, namespace, pruneSelector, }: ApplyParams) { - const input = Buffer.from(encodeYamlMulti(objects)) + // Add the raw input as an annotation on each manifest (this is helpful beyond kubectl's own annotation, because + // kubectl applies some normalization/transformation that is sometimes difficult to reason about). + for (const manifest of manifests) { + if (!manifest.metadata.annotations) { + manifest.metadata.annotations = {} + } + if (manifest.metadata.annotations[gardenAnnotationKey("last-applied-configuration")]) { + delete manifest.metadata.annotations[gardenAnnotationKey("last-applied-configuration")] + } + manifest.metadata.annotations[gardenAnnotationKey("last-applied-configuration")] = stringify(manifest) + } + + const input = Buffer.from(encodeYamlMulti(manifests)) let args = ["apply"] dryRun && args.push("--dry-run") diff --git a/garden-service/src/plugins/kubernetes/kubernetes-module/handlers.ts b/garden-service/src/plugins/kubernetes/kubernetes-module/handlers.ts index a911aea078..cbd42a41ee 100644 --- a/garden-service/src/plugins/kubernetes/kubernetes-module/handlers.ts +++ b/garden-service/src/plugins/kubernetes/kubernetes-module/handlers.ts @@ -17,7 +17,7 @@ import { getNamespace, getAppNamespace } from "../namespace" import { KubernetesPluginContext } from "../config" import { KubernetesResource, KubernetesServerResource } from "../types" import { ServiceStatus } from "../../../types/service" -import { compareDeployedObjects, waitForResources } from "../status/status" +import { compareDeployedResources, waitForResources } from "../status/status" import { KubeApi } from "../api" import { ModuleAndRuntimeActionHandlers } from "../../../types/plugin/plugin" import { getAllLogs } from "../logs" @@ -68,7 +68,7 @@ async function getServiceStatus({ const api = await KubeApi.factory(log, k8sCtx.provider) const manifests = await getManifests(api, log, module, namespace) - const { state, remoteResources } = await compareDeployedObjects(k8sCtx, api, namespace, manifests, log, false) + const { state, remoteResources } = await compareDeployedResources(k8sCtx, api, namespace, manifests, log) const forwardablePorts = getForwardablePorts(remoteResources) diff --git a/garden-service/src/plugins/kubernetes/status/status.ts b/garden-service/src/plugins/kubernetes/status/status.ts index 3883bb441f..d5587114d9 100644 --- a/garden-service/src/plugins/kubernetes/status/status.ts +++ b/garden-service/src/plugins/kubernetes/status/status.ts @@ -10,13 +10,13 @@ import { diffString } from "json-diff" import { DeploymentError } from "../../../exceptions" import { PluginContext } from "../../../plugin-context" import { ServiceState, combineStates } from "../../../types/service" -import { sleep, encodeYamlMulti, deepMap } from "../../../util/util" +import { sleep, deepMap } from "../../../util/util" import { KubeApi } from "../api" -import { KUBECTL_DEFAULT_TIMEOUT, kubectl } from "../kubectl" +import { KUBECTL_DEFAULT_TIMEOUT } from "../kubectl" import { getAppNamespace } from "../namespace" import Bluebird from "bluebird" import { KubernetesResource, KubernetesServerResource } from "../types" -import { zip, isArray, isPlainObject, pickBy, mapValues, flatten } from "lodash" +import { zip, isArray, isPlainObject, pickBy, mapValues, flatten, cloneDeep } from "lodash" import { KubernetesProvider, KubernetesPluginContext } from "../config" import { isSubset } from "../../../util/is-subset" import { LogEntry } from "../../../logger/log-entry" @@ -32,6 +32,8 @@ import { getPods } from "../util" import { checkWorkloadStatus } from "./workload" import { checkPodStatus } from "./pod" import { waitForServiceEndpoints } from "./service" +import { gardenAnnotationKey } from "../../../util/string" +import stringify from "json-stable-stringify" export interface ResourceStatus { state: ServiceState @@ -240,35 +242,34 @@ interface ComparisonResult { /** * Check if each of the given Kubernetes objects matches what's installed in the cluster */ -export async function compareDeployedObjects( +export async function compareDeployedResources( ctx: KubernetesPluginContext, api: KubeApi, namespace: string, - resources: KubernetesResource[], - log: LogEntry, - skipDiff: boolean + manifests: KubernetesResource[], + log: LogEntry ): Promise { // Unroll any `List` resource types - resources = flatten(resources.map((r: any) => (r.apiVersion === "v1" && r.kind === "List" ? r.items : [r]))) + manifests = flatten(manifests.map((r: any) => (r.apiVersion === "v1" && r.kind === "List" ? r.items : [r]))) // Check if any resources are missing from the cluster. - const maybeDeployedObjects = await Bluebird.map(resources, (resource) => + const maybeDeployedObjects = await Bluebird.map(manifests, (resource) => getDeployedResource(ctx, ctx.provider, resource, log) ) - const deployedObjects = maybeDeployedObjects.filter((o) => o !== null) + const deployedResources = maybeDeployedObjects.filter((o) => o !== null) const result: ComparisonResult = { state: "unknown", - remoteResources: deployedObjects.filter((o) => o !== null), + remoteResources: deployedResources.filter((o) => o !== null), } const logDescription = (resource: KubernetesResource) => `${resource.kind}/${resource.metadata.name}` - const missingObjectNames = zip(resources, maybeDeployedObjects) + const missingObjectNames = zip(manifests, maybeDeployedObjects) .filter(([_, deployed]) => !deployed) .map(([resource, _]) => logDescription(resource!)) - if (missingObjectNames.length === resources.length) { + if (missingObjectNames.length === manifests.length) { // All resources missing. log.verbose(`All resources missing from cluster`) result.state = "missing" @@ -281,48 +282,15 @@ export async function compareDeployedObjects( } // From here, the state can only be "ready" or "outdated", so we proceed to compare the old & new specs. + log.debug(`Getting currently deployed resource statuses...`) - // TODO: The skipDiff parameter is a temporary workaround until we finish implementing diffing in a more reliable way. - if (!skipDiff) { - // First we try using `kubectl diff`, to avoid potential normalization issues (i.e. false negatives). This errors - // with exit code 1 if there is a mismatch, but may also fail with the same exit code for a number of other reasons, - // including the cluster not supporting dry-runs, certain CRDs not supporting dry-runs etc. - const yamlResources = await encodeYamlMulti(resources) - const provider = ctx.provider - - try { - await kubectl.exec({ log, provider, namespace, args: ["diff", "-f", "-"], input: Buffer.from(yamlResources) }) - - // If the commands exits succesfully, the check was successful and the diff is empty. - log.verbose(`kubectl diff indicates all resources match the deployed resources.`) - result.state = "ready" - return result - } catch (err) { - // Exited with non-zero code. Check for error messages on stderr. If one is there, the command was unable to - // complete the check, so we fall back to our own mechanism. Otherwise the command worked, but one or more - // resources are missing or outdated. - if (err.stderr && err.stderr.trim() !== "exit status 1") { - log.debug(`kubectl diff failed: ${err.message}\n${err.stderr}`) - } else { - log.debug(`kubectl diff indicates one or more resources are outdated.`) - log.silly(err.stdout) - result.state = "outdated" - return result - } - } - } - - // Using kubectl diff didn't work, so we fall back to our own comparison check, which works in _most_ cases, - // but doesn't exhaustively handle normalization issues. - log.debug(`Getting currently deployed resources...`) - - const deployedObjectStatuses: ResourceStatus[] = await Bluebird.map(deployedObjects, async (resource) => + const deployedObjectStatuses: ResourceStatus[] = await Bluebird.map(deployedResources, async (resource) => checkResourceStatus(api, namespace, resource, log) ) const deployedStates = deployedObjectStatuses.map((s) => s.state) if (deployedStates.find((s) => s !== "ready")) { - const descriptions = zip(deployedObjects, deployedStates) + const descriptions = zip(deployedResources, deployedStates) .filter(([_, s]) => s !== "ready") .map(([o, s]) => `${logDescription(o!)}: "${s}"`) .join("\n") @@ -340,48 +308,72 @@ export async function compareDeployedObjects( log.verbose(`Comparing expected and deployed resources...`) - for (let [newSpec, existingSpec] of zip(resources, deployedObjects) as KubernetesResource[][]) { + for (let [newManifest, deployedResource] of zip(manifests, deployedResources) as KubernetesResource[][]) { + let manifest = cloneDeep(newManifest) + + if (!manifest.metadata.annotations) { + manifest.metadata.annotations = {} + } + + // Discard any last applied config from the input manifest + if (manifest.metadata.annotations[gardenAnnotationKey("last-applied-configuration")]) { + delete manifest.metadata.annotations[gardenAnnotationKey("last-applied-configuration")] + } + + // Start by checking for "last applied configuration" annotations and comparing against those. + // This can be more accurate than comparing against resolved resources. + if (deployedResource.metadata && deployedResource.metadata.annotations) { + const lastApplied = + deployedResource.metadata.annotations[gardenAnnotationKey("last-applied-configuration")] || + deployedResource.metadata.annotations["kubectl.kubernetes.io/last-applied-configuration"] + + // The new manifest matches the last applied manifest + if (lastApplied && stringify(manifest) === lastApplied) { + continue + } + } + // to avoid normalization issues, we convert all numeric values to strings and then compare - newSpec = deepMap(newSpec, (v) => (typeof v === "number" ? v.toString() : v)) - existingSpec = deepMap(existingSpec, (v) => (typeof v === "number" ? v.toString() : v)) + manifest = deepMap(manifest, (v) => (typeof v === "number" ? v.toString() : v)) + deployedResource = deepMap(deployedResource, (v) => (typeof v === "number" ? v.toString() : v)) // the API version may implicitly change when deploying - existingSpec.apiVersion = newSpec.apiVersion + manifest.apiVersion = deployedResource.apiVersion - // the namespace property is silently dropped when added to non-namespaced - if (newSpec.metadata.namespace && existingSpec.metadata.namespace === undefined) { - delete newSpec.metadata.namespace + // the namespace property is silently dropped when added to non-namespaced resources + if (manifest.metadata.namespace && deployedResource.metadata.namespace === undefined) { + delete manifest.metadata.namespace } - if (!existingSpec.metadata.annotations) { - existingSpec.metadata.annotations = {} + if (!deployedResource.metadata.annotations) { + deployedResource.metadata.annotations = {} } // handle auto-filled properties (this is a bit of a design issue in the K8s API) - if (newSpec.kind === "Service" && newSpec.spec.clusterIP === "") { - delete newSpec.spec.clusterIP + if (manifest.kind === "Service" && manifest.spec.clusterIP === "") { + delete manifest.spec.clusterIP } // NOTE: this approach won't fly in the long run, but hopefully we can climb out of this mess when // `kubectl diff` is ready, or server-side apply/diff is ready - if (newSpec.kind === "DaemonSet" || newSpec.kind === "Deployment" || newSpec.kind == "StatefulSet") { + if (manifest.kind === "DaemonSet" || manifest.kind === "Deployment" || manifest.kind == "StatefulSet") { // handle properties that are omitted in the response because they have the default value // (another design issue in the K8s API) - if (newSpec.spec.minReadySeconds === 0) { - delete newSpec.spec.minReadySeconds + if (manifest.spec.minReadySeconds === 0) { + delete manifest.spec.minReadySeconds } - if (newSpec.spec.template && newSpec.spec.template.spec && newSpec.spec.template.spec.hostNetwork === false) { - delete newSpec.spec.template.spec.hostNetwork + if (manifest.spec.template && manifest.spec.template.spec && manifest.spec.template.spec.hostNetwork === false) { + delete manifest.spec.template.spec.hostNetwork } } // clean null values - newSpec = removeNull(newSpec) + manifest = removeNull(manifest) - if (!isSubset(existingSpec, newSpec)) { - if (newSpec) { - log.verbose(`Resource ${newSpec.metadata.name} is not a superset of deployed resource`) - log.debug(diffString(existingSpec, newSpec)) + if (!isSubset(deployedResource, manifest)) { + if (manifest) { + log.verbose(`Resource ${manifest.metadata.name} is not a superset of deployed resource`) + log.debug(diffString(deployedResource, manifest)) } // console.log(JSON.stringify(resource, null, 4)) // console.log(JSON.stringify(existingSpec, null, 4)) diff --git a/garden-service/src/util/string.ts b/garden-service/src/util/string.ts index 0a552aa802..be3f2b1cd9 100644 --- a/garden-service/src/util/string.ts +++ b/garden-service/src/util/string.ts @@ -20,6 +20,7 @@ const gardenAnnotationPrefix = "garden.io/" export type GardenAnnotationKey = | "generated" + | "last-applied-configuration" | "hot-reload" | "module" | "moduleVersion"