diff --git a/CHANGELOG.md b/CHANGELOG.md index 35501b924b..90eb0ff394 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## Unreleased +### Fixed + +- Objects created on clusters older than 1.18 will no longer see a + `before-first-apply` conflict when Pulumi performs a server-side apply for + the first time. (https://github.com/pulumi/pulumi-kubernetes/pull/3275) + ## 4.18.2 (October 16, 2024) ### Fixed diff --git a/provider/cmd/pulumi-resource-kubernetes/schema.json b/provider/cmd/pulumi-resource-kubernetes/schema.json index fba434d885..2106d92d88 100644 --- a/provider/cmd/pulumi-resource-kubernetes/schema.json +++ b/provider/cmd/pulumi-resource-kubernetes/schema.json @@ -90562,7 +90562,7 @@ ] }, "kubernetes:helm.sh/v3:Chart": { - "description": "{{% notes type=\"info\" %}}\nA newer version of this resource is available as [kubernetes.helm.sh/v4.Chart](/registry/packages/kubernetes/api-docs/helm/v4/chart/).\nSee the corresponding [blog post](/blog/kubernetes-yaml-v2/) for more information.\n{{% /notes %}}\n\nChart is a component representing a collection of resources described by an arbitrary Helm Chart.\n\nThe Helm Chart can be fetched from any source that is accessible to the `helm` command line. Values in the `values.yml` file can be overridden using `ChartOpts.values` (equivalent to `--set` or having multiple `values.yml` files). Objects can be transformed arbitrarily by supplying callbacks to `ChartOpts.transformations`.\n\nThe `Chart` resource renders the templates from your chart and then manage them directly with the Pulumi Kubernetes provider.\n\n`Chart` does not use Tiller. The Chart specified is copied and expanded locally; the semantics are equivalent to running `helm template` and then using Pulumi to manage the resulting YAML manifests. Any values that would be retrieved in-cluster are assigned fake values, and none of Tiller's server-side validity testing is executed.\n\nYou may also want to consider the `Release` resource as an alternative method for managing helm charts. For more information about the trade-offs between these options see: [Choosing the right Helm resource for your use case](https://www.pulumi.com/registry/packages/kubernetes/how-to-guides/choosing-the-right-helm-resource-for-your-use-case)\n\nThis resource is provided for the following languages: Node.js (JavaScript, TypeScript), Python, Go, and .NET (C#, F#, VB).\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Local Chart Directory\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n path: \"./nginx-ingress\",\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, LocalChartOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n LocalChartOpts(\n path=\"./nginx-ingress\",\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new LocalChartArgs\n {\n Path = \"./nginx-ingress\",\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tPath: pulumi.String(\"./nginx-ingress\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Remote Chart\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n }\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n\n{{% /example %}}\n{{% example %}}\n### Set Chart Values\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n values: {\n controller: {\n metrics: {\n enabled: true,\n }\n }\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n values={\n \"controller\": {\n \"metrics\": {\n \"enabled\": True,\n },\n },\n },\n ),\n)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var values = new Dictionary\n {\n [\"controller\"] = new Dictionary\n {\n [\"metrics\"] = new Dictionary\n {\n [\"enabled\"] = true\n }\n },\n };\n\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n Values = values,\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t\tValues: pulumi.Map{\n\t\t\t\t\"controller\": pulumi.Map{\n\t\t\t\t\t\"metrics\": pulumi.Map{\n\t\t\t\t\t\t\"enabled\": pulumi.Bool(true),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Deploy Chart into Namespace\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n namespace: \"test-namespace\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n namespace=\"test-namespace\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n Namespace = \"test-namespace\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tNamespace: pulumi.String(\"test-namespace\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Depend on a Chart resource\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n namespace: \"test-namespace\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n\n// Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n// resources are ready. Note the use of the `ready` attribute; depending on the Chart resource directly will not work.\nnew k8s.core.v1.ConfigMap(\"foo\", {\n metadata: { namespace: namespaceName },\n data: {foo: \"bar\"}\n}, {dependsOn: nginxIngress.ready})\n```\n```python\nimport pulumi\nfrom pulumi_kubernetes.core.v1 import ConfigMap, ConfigMapInitArgs\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n namespace=\"test-namespace\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n\n# Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n# resources are ready. Note the use of the `ready` attribute; depending on the Chart resource directly will not work.\nConfigMap(\"foo\", ConfigMapInitArgs(data={\"foo\": \"bar\"}), opts=pulumi.ResourceOptions(depends_on=nginx_ingress.ready))\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Core.V1;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n Namespace = \"test-namespace\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n });\n\n // Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n // resources are ready. Note the use of the `Ready()` method; depending on the Chart resource directly will\n // not work.\n new ConfigMap(\"foo\", new Pulumi.Kubernetes.Types.Inputs.Core.V1.ConfigMapArgs\n {\n Data = new InputMap\n {\n {\"foo\", \"bar\"}\n },\n }, new CustomResourceOptions\n {\n DependsOn = nginx.Ready(),\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\tcorev1 \"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1\"\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tNamespace: pulumi.String(\"test-namespace\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n\t\t// resources are ready. Note the use of the `Ready` attribute, which is used with `DependsOnInputs` rather than\n\t\t// `DependsOn`. Depending on the Chart resource directly, or using `DependsOn` will not work.\n\t\t_, err = corev1.NewConfigMap(ctx, \"cm\", &corev1.ConfigMapArgs{\n\t\t\tData: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t}, pulumi.DependsOnInputs(chart.Ready))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Chart with Transformations\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n transformations: [\n // Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Service\" && obj.apiVersion === \"v1\") {\n if (obj.spec && obj.spec.type && obj.spec.type === \"LoadBalancer\") {\n obj.spec.type = \"ClusterIP\";\n }\n }\n },\n\n // Set a resource alias for a previous name.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Deployment\") {\n opts.aliases = [{ name: \"oldName\" }]\n }\n },\n\n // Omit a resource from the Chart by transforming the specified resource definition to an empty List.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Pod\" && obj.metadata.name === \"test\") {\n obj.apiVersion = \"v1\"\n obj.kind = \"List\"\n }\n },\n ],\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\n# Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\ndef make_service_private(obj, opts):\n if obj[\"kind\"] == \"Service\" and obj[\"apiVersion\"] == \"v1\":\n try:\n t = obj[\"spec\"][\"type\"]\n if t == \"LoadBalancer\":\n obj[\"spec\"][\"type\"] = \"ClusterIP\"\n except KeyError:\n pass\n\n\n# Set a resource alias for a previous name.\ndef alias(obj, opts):\n if obj[\"kind\"] == \"Deployment\":\n opts.aliases = [\"oldName\"]\n\n\n# Omit a resource from the Chart by transforming the specified resource definition to an empty List.\ndef omit_resource(obj, opts):\n if obj[\"kind\"] == \"Pod\" and obj[\"metadata\"][\"name\"] == \"test\":\n obj[\"apiVersion\"] = \"v1\"\n obj[\"kind\"] = \"List\"\n\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n transformations=[make_service_private, alias, omit_resource],\n ),\n)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Collections.Immutable;\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n Transformations =\n {\n LoadBalancerToClusterIP,\n ResourceAlias,\n OmitTestPod,\n }\n\n });\n\n // Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\n ImmutableDictionary LoadBalancerToClusterIP(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n if ((string)obj[\"kind\"] == \"Service\" && (string)obj[\"apiVersion\"] == \"v1\")\n {\n var spec = (ImmutableDictionary)obj[\"spec\"];\n if (spec != null && (string)spec[\"type\"] == \"LoadBalancer\")\n {\n return obj.SetItem(\"spec\", spec.SetItem(\"type\", \"ClusterIP\"));\n }\n }\n\n return obj;\n }\n\n // Set a resource alias for a previous name.\n ImmutableDictionary ResourceAlias(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n if ((string)obj[\"kind\"] == \"Deployment\")\n {\n opts.Aliases.Add(new Alias { Name = \"oldName\" });\n }\n\n return obj;\n }\n\n // Omit a resource from the Chart by transforming the specified resource definition to an empty List.\n ImmutableDictionary OmitTestPod(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n var metadata = (ImmutableDictionary)obj[\"metadata\"];\n if ((string)obj[\"kind\"] == \"Pod\" && (string)metadata[\"name\"] == \"test\")\n {\n return new Dictionary\n {\n [\"apiVersion\"] = \"v1\",\n [\"kind\"] = \"List\",\n [\"items\"] = new Dictionary(),\n }.ToImmutableDictionary();\n }\n\n return obj;\n }\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/yaml\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t\tTransformations: []yaml.Transformation{\n\t\t\t\t// Make every service private to the cluster, i.e., turn all services into ClusterIP\n\t\t\t\t// instead of LoadBalancer.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tif state[\"kind\"] == \"Service\" {\n\t\t\t\t\t\tspec := state[\"spec\"].(map[string]interface{})\n\t\t\t\t\t\tspec[\"type\"] = \"ClusterIP\"\n\t\t\t\t\t}\n\t\t\t\t},\n\n\t\t\t\t// Set a resource alias for a previous name.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tif state[\"kind\"] == \"Deployment\" {\n\t\t\t\t\t\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"oldName\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\t\t\t\t\t\topts = append(opts, aliases)\n\t\t\t\t\t}\n\t\t\t\t},\n\n\t\t\t\t// Omit a resource from the Chart by transforming the specified resource definition\n\t\t\t\t// to an empty List.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tname := state[\"metadata\"].(map[string]interface{})[\"name\"]\n\t\t\t\t\tif state[\"kind\"] == \"Pod\" && name == \"test\" {\n\t\t\t\t\t\tstate[\"apiVersion\"] = \"core/v1\"\n\t\t\t\t\t\tstate[\"kind\"] = \"List\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% /examples %}}\n", + "description": "{{% notes type=\"info\" %}}\nA newer version of this resource is available as [kubernetes.helm.sh/v4.Chart](/registry/packages/kubernetes/api-docs/helm/v4/chart/).\nSee the corresponding [blog post](/blog/kubernetes-chart-v4/) for more information.\n{{% /notes %}}\n\nChart is a component representing a collection of resources described by an arbitrary Helm Chart.\n\nThe Helm Chart can be fetched from any source that is accessible to the `helm` command line. Values in the `values.yml` file can be overridden using `ChartOpts.values` (equivalent to `--set` or having multiple `values.yml` files). Objects can be transformed arbitrarily by supplying callbacks to `ChartOpts.transformations`.\n\nThe `Chart` resource renders the templates from your chart and then manage them directly with the Pulumi Kubernetes provider.\n\n`Chart` does not use Tiller. The Chart specified is copied and expanded locally; the semantics are equivalent to running `helm template` and then using Pulumi to manage the resulting YAML manifests. Any values that would be retrieved in-cluster are assigned fake values, and none of Tiller's server-side validity testing is executed.\n\nYou may also want to consider the `Release` resource as an alternative method for managing helm charts. For more information about the trade-offs between these options see: [Choosing the right Helm resource for your use case](https://www.pulumi.com/registry/packages/kubernetes/how-to-guides/choosing-the-right-helm-resource-for-your-use-case)\n\nThis resource is provided for the following languages: Node.js (JavaScript, TypeScript), Python, Go, and .NET (C#, F#, VB).\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Local Chart Directory\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n path: \"./nginx-ingress\",\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, LocalChartOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n LocalChartOpts(\n path=\"./nginx-ingress\",\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new LocalChartArgs\n {\n Path = \"./nginx-ingress\",\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tPath: pulumi.String(\"./nginx-ingress\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Remote Chart\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n }\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n\n{{% /example %}}\n{{% example %}}\n### Set Chart Values\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n values: {\n controller: {\n metrics: {\n enabled: true,\n }\n }\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n values={\n \"controller\": {\n \"metrics\": {\n \"enabled\": True,\n },\n },\n },\n ),\n)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var values = new Dictionary\n {\n [\"controller\"] = new Dictionary\n {\n [\"metrics\"] = new Dictionary\n {\n [\"enabled\"] = true\n }\n },\n };\n\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n Values = values,\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t\tValues: pulumi.Map{\n\t\t\t\t\"controller\": pulumi.Map{\n\t\t\t\t\t\"metrics\": pulumi.Map{\n\t\t\t\t\t\t\"enabled\": pulumi.Bool(true),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Deploy Chart into Namespace\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n namespace: \"test-namespace\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n namespace=\"test-namespace\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n Namespace = \"test-namespace\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tNamespace: pulumi.String(\"test-namespace\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Depend on a Chart resource\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n namespace: \"test-namespace\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n});\n\n// Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n// resources are ready. Note the use of the `ready` attribute; depending on the Chart resource directly will not work.\nnew k8s.core.v1.ConfigMap(\"foo\", {\n metadata: { namespace: namespaceName },\n data: {foo: \"bar\"}\n}, {dependsOn: nginxIngress.ready})\n```\n```python\nimport pulumi\nfrom pulumi_kubernetes.core.v1 import ConfigMap, ConfigMapInitArgs\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n namespace=\"test-namespace\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n ),\n)\n\n# Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n# resources are ready. Note the use of the `ready` attribute; depending on the Chart resource directly will not work.\nConfigMap(\"foo\", ConfigMapInitArgs(data={\"foo\": \"bar\"}), opts=pulumi.ResourceOptions(depends_on=nginx_ingress.ready))\n```\n```csharp\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Core.V1;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n Namespace = \"test-namespace\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n });\n\n // Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n // resources are ready. Note the use of the `Ready()` method; depending on the Chart resource directly will\n // not work.\n new ConfigMap(\"foo\", new Pulumi.Kubernetes.Types.Inputs.Core.V1.ConfigMapArgs\n {\n Data = new InputMap\n {\n {\"foo\", \"bar\"}\n },\n }, new CustomResourceOptions\n {\n DependsOn = nginx.Ready(),\n });\n\n }\n}\n```\n```go\npackage main\n\nimport (\n\tcorev1 \"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1\"\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tNamespace: pulumi.String(\"test-namespace\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create a ConfigMap depending on the Chart. The ConfigMap will not be created until after all of the Chart\n\t\t// resources are ready. Note the use of the `Ready` attribute, which is used with `DependsOnInputs` rather than\n\t\t// `DependsOn`. Depending on the Chart resource directly, or using `DependsOn` will not work.\n\t\t_, err = corev1.NewConfigMap(ctx, \"cm\", &corev1.ConfigMapArgs{\n\t\t\tData: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t}, pulumi.DependsOnInputs(chart.Ready))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% example %}}\n### Chart with Transformations\n\n```typescript\nimport * as k8s from \"@pulumi/kubernetes\";\n\nconst nginxIngress = new k8s.helm.v3.Chart(\"nginx-ingress\", {\n chart: \"nginx-ingress\",\n version: \"1.24.4\",\n fetchOpts:{\n repo: \"https://charts.helm.sh/stable\",\n },\n transformations: [\n // Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Service\" && obj.apiVersion === \"v1\") {\n if (obj.spec && obj.spec.type && obj.spec.type === \"LoadBalancer\") {\n obj.spec.type = \"ClusterIP\";\n }\n }\n },\n\n // Set a resource alias for a previous name.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Deployment\") {\n opts.aliases = [{ name: \"oldName\" }]\n }\n },\n\n // Omit a resource from the Chart by transforming the specified resource definition to an empty List.\n (obj: any, opts: pulumi.CustomResourceOptions) => {\n if (obj.kind === \"Pod\" && obj.metadata.name === \"test\") {\n obj.apiVersion = \"v1\"\n obj.kind = \"List\"\n }\n },\n ],\n});\n```\n```python\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts, FetchOpts\n\n# Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\ndef make_service_private(obj, opts):\n if obj[\"kind\"] == \"Service\" and obj[\"apiVersion\"] == \"v1\":\n try:\n t = obj[\"spec\"][\"type\"]\n if t == \"LoadBalancer\":\n obj[\"spec\"][\"type\"] = \"ClusterIP\"\n except KeyError:\n pass\n\n\n# Set a resource alias for a previous name.\ndef alias(obj, opts):\n if obj[\"kind\"] == \"Deployment\":\n opts.aliases = [\"oldName\"]\n\n\n# Omit a resource from the Chart by transforming the specified resource definition to an empty List.\ndef omit_resource(obj, opts):\n if obj[\"kind\"] == \"Pod\" and obj[\"metadata\"][\"name\"] == \"test\":\n obj[\"apiVersion\"] = \"v1\"\n obj[\"kind\"] = \"List\"\n\n\nnginx_ingress = Chart(\n \"nginx-ingress\",\n ChartOpts(\n chart=\"nginx-ingress\",\n version=\"1.24.4\",\n fetch_opts=FetchOpts(\n repo=\"https://charts.helm.sh/stable\",\n ),\n transformations=[make_service_private, alias, omit_resource],\n ),\n)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Collections.Immutable;\nusing System.Threading.Tasks;\nusing Pulumi;\nusing Pulumi.Kubernetes.Helm;\nusing Pulumi.Kubernetes.Helm.V3;\n\nclass HelmStack : Stack\n{\n public HelmStack()\n {\n var nginx = new Chart(\"nginx-ingress\", new ChartArgs\n {\n Chart = \"nginx-ingress\",\n Version = \"1.24.4\",\n FetchOptions = new ChartFetchArgs\n {\n Repo = \"https://charts.helm.sh/stable\"\n },\n Transformations =\n {\n LoadBalancerToClusterIP,\n ResourceAlias,\n OmitTestPod,\n }\n\n });\n\n // Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.\n ImmutableDictionary LoadBalancerToClusterIP(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n if ((string)obj[\"kind\"] == \"Service\" && (string)obj[\"apiVersion\"] == \"v1\")\n {\n var spec = (ImmutableDictionary)obj[\"spec\"];\n if (spec != null && (string)spec[\"type\"] == \"LoadBalancer\")\n {\n return obj.SetItem(\"spec\", spec.SetItem(\"type\", \"ClusterIP\"));\n }\n }\n\n return obj;\n }\n\n // Set a resource alias for a previous name.\n ImmutableDictionary ResourceAlias(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n if ((string)obj[\"kind\"] == \"Deployment\")\n {\n opts.Aliases.Add(new Alias { Name = \"oldName\" });\n }\n\n return obj;\n }\n\n // Omit a resource from the Chart by transforming the specified resource definition to an empty List.\n ImmutableDictionary OmitTestPod(ImmutableDictionary obj, CustomResourceOptions opts)\n {\n var metadata = (ImmutableDictionary)obj[\"metadata\"];\n if ((string)obj[\"kind\"] == \"Pod\" && (string)metadata[\"name\"] == \"test\")\n {\n return new Dictionary\n {\n [\"apiVersion\"] = \"v1\",\n [\"kind\"] = \"List\",\n [\"items\"] = new Dictionary(),\n }.ToImmutableDictionary();\n }\n\n return obj;\n }\n }\n}\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/yaml\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := helm.NewChart(ctx, \"nginx-ingress\", helm.ChartArgs{\n\t\t\tChart: pulumi.String(\"nginx-ingress\"),\n\t\t\tVersion: pulumi.String(\"1.24.4\"),\n\t\t\tFetchArgs: helm.FetchArgs{\n\t\t\t\tRepo: pulumi.String(\"https://charts.helm.sh/stable\"),\n\t\t\t},\n\t\t\tTransformations: []yaml.Transformation{\n\t\t\t\t// Make every service private to the cluster, i.e., turn all services into ClusterIP\n\t\t\t\t// instead of LoadBalancer.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tif state[\"kind\"] == \"Service\" {\n\t\t\t\t\t\tspec := state[\"spec\"].(map[string]interface{})\n\t\t\t\t\t\tspec[\"type\"] = \"ClusterIP\"\n\t\t\t\t\t}\n\t\t\t\t},\n\n\t\t\t\t// Set a resource alias for a previous name.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tif state[\"kind\"] == \"Deployment\" {\n\t\t\t\t\t\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"oldName\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\t\t\t\t\t\topts = append(opts, aliases)\n\t\t\t\t\t}\n\t\t\t\t},\n\n\t\t\t\t// Omit a resource from the Chart by transforming the specified resource definition\n\t\t\t\t// to an empty List.\n\t\t\t\tfunc(state map[string]interface{}, opts ...pulumi.ResourceOption) {\n\t\t\t\t\tname := state[\"metadata\"].(map[string]interface{})[\"name\"]\n\t\t\t\t\tif state[\"kind\"] == \"Pod\" && name == \"test\" {\n\t\t\t\t\t\tstate[\"apiVersion\"] = \"core/v1\"\n\t\t\t\t\t\tstate[\"kind\"] = \"List\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n```\n{{% /example %}}\n{{% /examples %}}\n", "properties": { "resources": { "type": "string", diff --git a/provider/pkg/await/await.go b/provider/pkg/await/await.go index d0b643a1fe..e85ef3aba5 100644 --- a/provider/pkg/await/await.go +++ b/provider/pkg/await/await.go @@ -536,8 +536,12 @@ func csaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy return client.Patch(c.Context, liveOldObj.GetName(), patchType, patch, options) } +type patcher interface { + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) +} + // ssaUpdate handles the logic for updating a resource using server-side apply. -func ssaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dynamic.ResourceInterface) (*unstructured.Unstructured, error) { +func ssaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client patcher) (*unstructured.Unstructured, error) { liveOldObj, err := fixCSAFieldManagers(c, liveOldObj, client) if err != nil { return nil, err @@ -720,7 +724,7 @@ func ensureFieldsAreMembers(s *fieldpath.Set) *fieldpath.Set { // fixCSAFieldManagers patches the field managers for an existing resource that was managed using client-side apply. // The new server-side apply field manager takes ownership of all these fields to avoid conflicts. -func fixCSAFieldManagers(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dynamic.ResourceInterface) (*unstructured.Unstructured, error) { +func fixCSAFieldManagers(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client patcher) (*unstructured.Unstructured, error) { if kinds.IsPatchResource(c.URN, c.Inputs.GetKind()) { // When dealing with a patch resource, there's no need to patch the field managers. // Doing so would inadvertently make us responsible for managing fields that are not relevant to us during updates, @@ -903,6 +907,13 @@ func patchForce(inputs, live *unstructured.Unstructured, preview bool) bool { } } } + // Legacy objects created before SSA don't record any managedFields, but + // they still have a default "before-first-apply" manager. This manager owns every + // field that existed before the first SSA apply. To work around this we will take + // control of the object. + if live != nil && len(live.GetManagedFields()) == 0 { + return true + } return false } diff --git a/provider/pkg/await/await_test.go b/provider/pkg/await/await_test.go index 2586c9f3cc..ff56fb85bd 100644 --- a/provider/pkg/await/await_test.go +++ b/provider/pkg/await/await_test.go @@ -1,9 +1,22 @@ -// Copyright 2021, Pulumi Corporation. All rights reserved. +// Copyright 2021-2024, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package await import ( "context" + "encoding/json" "fmt" "testing" "time" @@ -31,9 +44,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apimachinery/pkg/util/managedfields/managedfieldstest" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" + kfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" kubetesting "k8s.io/client-go/testing" + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/yaml" ) var ( @@ -1151,3 +1170,286 @@ func FailedRESTMapper(mapper meta.ResettableRESTMapper, err error) *fake.StubRes }, } } + +func fakeTypeConverter(t *testing.T) managedfields.TypeConverter { + t.Helper() + + openapi, err := fake.LoadOpenAPISchema() + require.NoError(t, err) + + swagger := spec.Swagger{} + raw, err := openapi.YAMLValue("") + require.NoError(t, err) + require.NoError(t, yaml.Unmarshal(raw, &swagger)) + + definitions := map[string]*spec.Schema{} + for k, v := range swagger.Definitions { + p := v + definitions[k] = &p + } + + tc, err := managedfields.NewTypeConverter(definitions, false) + require.NoError(t, err) + return tc +} + +// TestSSAWithOldObjects is a regression test for +// https://github.com/pulumi/customer-support/issues/1837. An object is created +// and manipulated such that it no longer has any .metadata.managedFields, as +// is the case with things created prior to 1.18. We confirm this reproduces +// the issue and that our SSA upgrade logic handles it. +func TestSSAWithOldObjects(t *testing.T) { + t.Parallel() + + tc := fakeTypeConverter(t) + fm := managedfieldstest.NewTestFieldManager(tc, schema.FromAPIVersionAndKind("v1", "Service")) + + inputs := &unstructured.Unstructured{} + in := `{ + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "labels": { + "app.kubernetes.io/instance": "autoscaler", + "app.kubernetes.io/managed-by": "pulumi", + "app.kubernetes.io/name": "aws-cluster-autoscaler", + "app.kubernetes.io/version": "1.28.2", + "helm.sh/chart": "cluster-autoscaler-9.34.1" + }, + "name": "cluster-autoscaler", + "namespace": "kube-system" + }, + "spec": {} + }` + require.NoError(t, json.Unmarshal([]byte(in), inputs)) + // We need the last-applied-config annotation in order to trigger kubectl's + // graceful CSA->SSA upgrade path. + last, err := inputs.MarshalJSON() + require.NoError(t, err) + inputs.SetAnnotations(map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": string(last), + }) + + // Create the object. As of 1.18 all objects are created with + // managedFields -- even when using CSA. + obj := inputs.DeepCopy() + err = fm.Update(obj, "kubectl-create") + require.NoError(t, err) + require.NotEmpty(t, fm.ManagedFields()) + assert.Len(t, fm.ManagedFields(), 1) + + // However we can still disable managed fields after creating the object by + // explicitly setting it to `[]`. + obj = inputs.DeepCopy() + obj.SetManagedFields([]metav1.ManagedFieldsEntry{}) + err = fm.Update(obj, "kubectl-update") + require.NoError(t, err) + assert.Empty(t, fm.ManagedFields()) + + // Try to update a label on the object using a naive apply. + obj = inputs.DeepCopy() + obj.SetLabels(map[string]string{ + "helm.sh/chart": "cluster-autoscaler-9.36.0", + }) + // Despite having no field managers, our apply still conflicts with the + // legacy "before-first-apply" manager. + err = fm.Apply(obj, "pulumi-kubernetes", false) + assert.ErrorContains(t, err, `Apply failed with 1 conflict: conflict with "before-first-apply" using v1: .metadata.labels.helm.sh/chart`) + + // Now try again using our SSA upgrade logic -- this should succeed. + cfg := &UpdateConfig{ + Inputs: obj, + Preview: false, + ProviderConfig: ProviderConfig{ + URN: resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), "v1/Service", "testresource"), + FieldManager: "pulumi-kubernetes", + ServerSideApply: true, + }, + } + _, err = ssaUpdate(cfg, obj, fieldManagerPatcher{fm}) + require.NoError(t, err) +} + +func TestSSAUpdate(t *testing.T) { + tests := []struct { + name string + obj string + preview bool + wantManagers []string + }{ + { + name: "we take ownership of kubectl CSA", + obj: `apiVersion: v1 +kind: Namespace +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Namespace","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"flux-system","app.kubernetes.io/part-of":"flux","pod-security.kubernetes.io/warn":"restricted","pod-security.kubernetes.io/warn-version":"latest"},"name":"flux-system"}} + creationTimestamp: "2024-09-24T19:27:32Z" + labels: + app.kubernetes.io/instance: flux-system + app.kubernetes.io/part-of: flux + kubernetes.io/metadata.name: flux-system + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: latest + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:app.kubernetes.io/instance: {} + f:app.kubernetes.io/part-of: {} + f:kubernetes.io/metadata.name: {} + f:pod-security.kubernetes.io/warn: {} + f:pod-security.kubernetes.io/warn-version: {} + manager: kubectl-client-side-apply + operation: Update + time: "2024-09-24T19:27:32Z" + name: flux-system + resourceVersion: "138234" + uid: c14c35d8-ae5d-4f53-8391-791d47efe337 +spec: + finalizers: + - kubernetes +status: + phase: Active`, + preview: false, + wantManagers: []string{"pulumi-kubernetes"}, + }, + { + name: "we take ownership of kubectl SSA", + obj: `apiVersion: v1 +kind: Namespace +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Namespace","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"flux-system","app.kubernetes.io/part-of":"flux","pod-security.kubernetes.io/warn":"restricted","pod-security.kubernetes.io/warn-version":"latest"},"name":"flux-system"}} + creationTimestamp: "2024-09-24T19:27:32Z" + labels: + app.kubernetes.io/instance: flux-system + app.kubernetes.io/part-of: flux + kubernetes.io/metadata.name: flux-system + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: latest + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:app.kubernetes.io/instance: {} + f:app.kubernetes.io/part-of: {} + f:kubernetes.io/metadata.name: {} + f:pod-security.kubernetes.io/warn: {} + f:pod-security.kubernetes.io/warn-version: {} + manager: kubectl-client-side-apply + operation: Update + time: "2024-09-24T19:27:32Z" + name: flux-system + resourceVersion: "138234" + uid: c14c35d8-ae5d-4f53-8391-791d47efe337 +spec: + finalizers: + - kubernetes +status: + phase: Active`, + preview: false, + wantManagers: []string{"pulumi-kubernetes"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var obj unstructured.Unstructured + require.NoError(t, yaml.Unmarshal([]byte(tt.obj), &obj)) + + typed, err := scheme.Scheme.New(obj.GroupVersionKind()) + require.NoError(t, err) + + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, typed)) + + // client, _, _, _ := fake.NewSimpleDynamicClient(fake.WithObjects(typed)) + // + client := kfake.NewClientset(typed) + c := client.CoreV1().Namespaces() + + inputs := obj.DeepCopy() + inputs.SetLabels(nil) + inputs.SetManagedFields(nil) + cfg := &UpdateConfig{ + Inputs: inputs, + Preview: tt.preview, + ProviderConfig: ProviderConfig{ + URN: resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), "v1/Service", "testresource"), + FieldManager: "pulumi-kubernetes", + ServerSideApply: true, + }, + } + live, err := ssaUpdate(cfg, &obj, untypedPatcher[*corev1.Namespace]{wrapped: c}) + require.NoError(t, err) + assert.Len(t, live.GetManagedFields(), 1) + for idx, want := range tt.wantManagers { + assert.Equal(t, want, live.GetManagedFields()[idx].Manager) + } + }) + } +} + +type typedPatcher[T runtime.Object] interface { + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) +} + +type untypedPatcher[T runtime.Object] struct { + wrapped typedPatcher[T] +} + +func (p untypedPatcher[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { + typed, err := p.wrapped.Patch(ctx, name, pt, data, options, subresources...) + if err != nil { + return nil, err + } + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(typed) + return &unstructured.Unstructured{Object: obj}, err +} + +type fieldManagerPatcher struct { + fm managedfieldstest.TestFieldManager +} + +func (p fieldManagerPatcher) Patch(_ context.Context, _ string, pt types.PatchType, data []byte, options metav1.PatchOptions, _ ...string) (*unstructured.Unstructured, error) { + if pt != types.ApplyPatchType { + return nil, fmt.Errorf("fieldManagerPatcher only handles Apply") + } + + force := false + if options.Force != nil { + force = *options.Force + } + + in, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + + obj, _, err := unstructured.UnstructuredJSONScheme.Decode(in, nil, nil) + if err != nil { + return nil, err + } + + err = p.fm.Apply(obj, options.FieldManager, force) + if err != nil { + return nil, err + } + + live := p.fm.Live() + return live.(*unstructured.Unstructured), err +} diff --git a/tests/sdk/java/csa_to_ssa_test.go b/tests/sdk/java/csa_to_ssa_test.go new file mode 100644 index 0000000000..a72659517c --- /dev/null +++ b/tests/sdk/java/csa_to_ssa_test.go @@ -0,0 +1,92 @@ +// Copyright 2016-2024, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "context" + "testing" + + "github.com/pulumi/providertest/pulumitest" + "github.com/pulumi/providertest/pulumitest/opttest" + "github.com/pulumi/pulumi/sdk/v3/go/auto/optup" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" +) + +// TestCSAToSSANoManagedFields tests that the conversion from CSA to SSA works as expected when the object +// being targeted does not have any managedFields entries. This occurs when the object was created before +// beta.2 of SSA (Kubernetes < 1.18). In this case, a conflict with the default `before-first-apply` occurs +// on the first apply with SSA. +// Note: interestingly this issue is not observed with ConfigMaps. +func TestCSAToSSANoManagedFields(t *testing.T) { + ctx := context.Background() + + // 1. Create the deployment using pulumi-kubernetes in CSA mode. + test := pulumitest.NewPulumiTest(t, "testdata/csa-to-ssa", opttest.SkipInstall()) + t.Logf("into %s", test.Source()) + t.Cleanup(func() { + test.Destroy() + }) + test.Preview() + test.Up() + + outputs, err := test.CurrentStack().Outputs(ctx) + require.NoError(t, err, "Failed to get outputs from stack") + namespace, ok := outputs["namespace"].Value.(string) + require.True(t, ok, "Failed to get namespace output as string") + require.NotEmpty(t, namespace, "Namespace output is empty") + depName, ok := outputs["deployment"].Value.(string) + require.True(t, ok, "Failed to get deployment name output as string") + require.NotEmpty(t, depName, "Deployment name output is empty") + + // 2. We need to nuke the .metadata.managedFields to simulate SSA takeover from an old CSA object. This has + // to be done after the first apply, as the object's lifecycle should be managed by Pulumi, and newer Kubernetes + // versions automatically populate this field. + config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile) + if err != nil { + t.Fatalf("Failed to build kubeconfig: %v", err) + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + t.Fatalf("Failed to create dynamic client: %v", err) + } + + depClientNamespaced := dynamicClient.Resource(schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }).Namespace(namespace) + + dep, err := depClientNamespaced.Get(ctx, depName, metav1.GetOptions{}) + require.NoError(t, err, "Failed to get deployment to purge managedFields") + + // Remove the managedFields from the object by setting it to an empty array. Deleting the field entirely will not + // remove it from the object. + dep.SetManagedFields([]metav1.ManagedFieldsEntry{}) + + // The update needs to be a PUT request, otherwise the server will just repopulate the managedFields. + dep, err = depClientNamespaced.Update(ctx, dep, metav1.UpdateOptions{}) + require.NoError(t, err, "Failed to update deployment to purge managedFields") + require.Empty(t, dep.GetManagedFields(), "Failed to remove managedFields from deployment object") + + // 3. Apply step 2 of testdata where SSA mode is enabled, with a change in the image spec field. + test.UpdateSource("testdata/csa-to-ssa/step2") + test.Preview() + test.Up() + test.Up(optup.ExpectNoChanges()) +} diff --git a/tests/sdk/java/testdata/csa-to-ssa/Pulumi.yaml b/tests/sdk/java/testdata/csa-to-ssa/Pulumi.yaml new file mode 100644 index 0000000000..dac91c201c --- /dev/null +++ b/tests/sdk/java/testdata/csa-to-ssa/Pulumi.yaml @@ -0,0 +1,40 @@ +name: yamlv2 +runtime: yaml +description: | + Creates a simple deployment object using CSA. +variables: {} +outputs: + namespace: ${ns.metadata.name} + deployment: ${deployment.metadata.name} +resources: + provider: + type: pulumi:providers:kubernetes + properties: + enableServerSideApply: false + ns: + type: kubernetes:core/v1:Namespace + options: + provider: ${provider} + # Deployment + deployment: + type: kubernetes:apps/v1:Deployment + properties: + metadata: + namespace: ${ns.metadata.name} + labels: + app: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.0 + name: nginx + options: + provider: ${provider} diff --git a/tests/sdk/java/testdata/csa-to-ssa/step2/Pulumi.yaml b/tests/sdk/java/testdata/csa-to-ssa/step2/Pulumi.yaml new file mode 100644 index 0000000000..37137419b3 --- /dev/null +++ b/tests/sdk/java/testdata/csa-to-ssa/step2/Pulumi.yaml @@ -0,0 +1,40 @@ +name: yamlv2 +runtime: yaml +description: | + Creates a simple deployment object using SSA and a field change. +variables: {} +outputs: + namespace: ${ns.metadata.name} + deployment: ${deployment.metadata.name} +resources: + provider: + type: pulumi:providers:kubernetes + properties: + enableServerSideApply: true + ns: + type: kubernetes:core/v1:Namespace + options: + provider: ${provider} + # Deployment + deployment: + type: kubernetes:apps/v1:Deployment + properties: + metadata: + namespace: ${ns.metadata.name} + labels: + app: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.1 + name: nginx + options: + provider: ${provider}