diff --git a/cmd/client.go b/cmd/client.go index 9c4fc81987..d299dcd547 100644 --- a/cmd/client.go +++ b/cmd/client.go @@ -58,16 +58,16 @@ func NewClient(cfg ClientConfig, options ...fn.Option) (*fn.Client, func()) { var ( t = newTransport(cfg.InsecureSkipVerify) // may provide a custom impl which proxies c = newCredentialsProvider(config.Dir(), t) // for accessing registries - d = newKnativeDeployer(cfg.Verbose) + d = newKnativeDeployer(cfg.Verbose) // default deployer (can be overridden via options) pp = newTektonPipelinesProvider(c, cfg.Verbose) o = []fn.Option{ // standard (shared) options for all commands fn.WithVerbose(cfg.Verbose), fn.WithTransport(t), fn.WithRepositoriesPath(config.RepositoriesPath()), fn.WithBuilder(buildpacks.NewBuilder(buildpacks.WithVerbose(cfg.Verbose))), - fn.WithRemover(knative.NewRemover(cfg.Verbose)), - fn.WithDescriber(knative.NewDescriber(cfg.Verbose)), - fn.WithLister(knative.NewLister(cfg.Verbose)), + fn.WithRemovers(knative.NewRemover(cfg.Verbose), k8s.NewRemover(cfg.Verbose)), + fn.WithDescribers(knative.NewDescriber(cfg.Verbose), k8s.NewDescriber(cfg.Verbose)), + fn.WithListers(knative.NewLister(cfg.Verbose), k8s.NewLister(cfg.Verbose)), fn.WithDeployer(d), fn.WithPipelinesProvider(pp), fn.WithPusher(docker.NewPusher( @@ -135,6 +135,15 @@ func newKnativeDeployer(verbose bool) fn.Deployer { return knative.NewDeployer(options...) } +func newK8sDeployer(verbose bool) fn.Deployer { + options := []k8s.DeployerOpt{ + k8s.WithDeployerVerbose(verbose), + k8s.WithDeployerDecorator(deployDecorator{}), + } + + return k8s.NewDeployer(options...) +} + type deployDecorator struct { oshDec k8s.OpenshiftMetadataDecorator } diff --git a/cmd/client_test.go b/cmd/client_test.go index 8be224f13c..8ddf97ce7e 100644 --- a/cmd/client_test.go +++ b/cmd/client_test.go @@ -21,10 +21,10 @@ func Test_NewTestClient(t *testing.T) { ) // Factory constructor options which should be used when invoking later - clientFn := NewTestClient(fn.WithRemover(remover)) + clientFn := NewTestClient(fn.WithRemovers(remover)) // Factory should ignore options provided when invoking - client, _ := clientFn(ClientConfig{}, fn.WithDescriber(describer)) + client, _ := clientFn(ClientConfig{}, fn.WithDescribers(describer)) // Trigger an invocation of the mocks by running the associated client // methods which depend on them diff --git a/cmd/completion_util.go b/cmd/completion_util.go index 7a4df6b848..1f72e38a07 100644 --- a/cmd/completion_util.go +++ b/cmd/completion_util.go @@ -9,21 +9,28 @@ import ( "strings" "github.com/spf13/cobra" - fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" "knative.dev/func/pkg/knative" ) func CompleteFunctionList(cmd *cobra.Command, args []string, toComplete string) (strings []string, directive cobra.ShellCompDirective) { - lister := knative.NewLister(false) + listers := []fn.Lister{ + knative.NewLister(false), + k8s.NewLister(false), + } - list, err := lister.List(cmd.Context(), "") - if err != nil { - directive = cobra.ShellCompDirectiveError - return + items := []fn.ListItem{} + for _, lister := range listers { + list, err := lister.List(cmd.Context(), "") + if err != nil { + directive = cobra.ShellCompDirectiveError + return + } + items = append(items, list...) } - for _, item := range list { + for _, item := range items { strings = append(strings, item.Name) } directive = cobra.ShellCompDirectiveDefault @@ -158,3 +165,26 @@ func CompleteBuilderList(cmd *cobra.Command, args []string, complete string) (ma return } + +func CompleteDeployerList(cmd *cobra.Command, args []string, complete string) (matches []string, d cobra.ShellCompDirective) { + deployers := []string{ + knative.KnativeDeployerName, + k8s.KubernetesDeployerName, + } + + d = cobra.ShellCompDirectiveNoFileComp + matches = []string{} + + if len(complete) == 0 { + matches = deployers + return + } + + for _, b := range deployers { + if strings.HasPrefix(b, complete) { + matches = append(matches, b) + } + } + + return +} diff --git a/cmd/delete_test.go b/cmd/delete_test.go index 97a4b9c25f..26717085e4 100644 --- a/cmd/delete_test.go +++ b/cmd/delete_test.go @@ -53,7 +53,7 @@ func TestDelete_Default(t *testing.T) { t.Fatal(err) } - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) cmd.SetArgs([]string{}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -104,7 +104,7 @@ func TestDelete_ByName(t *testing.T) { // Create a command with a client constructor fn that instantiates a client // with a mocked remover. - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) cmd.SetArgs([]string{testname}) // run: func delete if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -132,7 +132,7 @@ func TestDelete_Namespace(t *testing.T) { return nil } - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) cmd.SetArgs([]string{testname, "--namespace", namespace}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -177,7 +177,7 @@ func TestDelete_NamespaceFlagPriority(t *testing.T) { t.Fatal(err) } - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) cmd.SetArgs([]string{testname, "--namespace", namespace2}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -233,7 +233,7 @@ created: 2021-01-01T00:00:00+00:00 // Command with a Client constructor that returns client with the // mocked remover. - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) cmd.SetArgs([]string{}) // Do not use test command args // Execute the command simulating no arguments. @@ -277,7 +277,7 @@ func TestDelete_ByPath(t *testing.T) { } // Command with a Client constructor using the mock remover. - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) // Execute the command only with the path argument cmd.SetArgs([]string{"-p", root}) @@ -303,7 +303,7 @@ func TestDelete_NameAndPathExclusivity(t *testing.T) { remover := mock.NewRemover() // Command with a Client constructor using the mock remover. - cmd := NewDeleteCmd(NewTestClient(fn.WithRemover(remover))) + cmd := NewDeleteCmd(NewTestClient(fn.WithRemovers(remover))) // Capture command output for inspection buf := new(bytes.Buffer) diff --git a/cmd/deploy.go b/cmd/deploy.go index 9c8a87c621..050664fad7 100644 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -15,11 +15,11 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/resource" "knative.dev/client/pkg/util" - "knative.dev/func/pkg/builders" "knative.dev/func/pkg/config" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" "knative.dev/func/pkg/utils" ) @@ -132,7 +132,7 @@ EXAMPLES PreRunE: bindEnv("build", "build-timestamp", "builder", "builder-image", "base-image", "confirm", "domain", "env", "git-branch", "git-dir", "git-url", "image", "namespace", "path", "platform", "push", "pvc-size", - "service-account", "registry", "registry-insecure", "remote", + "service-account", "deployer", "registry", "registry-insecure", "remote", "username", "password", "token", "verbose", "remote-storage-class"), RunE: func(cmd *cobra.Command, args []string) error { return runDeploy(cmd, newClient) @@ -193,6 +193,8 @@ EXAMPLES "When triggering a remote deployment, set a custom volume size to allocate for the build operation ($FUNC_PVC_SIZE)") cmd.Flags().String("service-account", f.Deploy.ServiceAccountName, "Service account to be used in the deployed function ($FUNC_SERVICE_ACCOUNT)") + cmd.Flags().String("deployer", f.Deploy.Deployer, + fmt.Sprintf("Type of deployment to use: '%s' for Knative Service (default) or '%s' for Kubernetes Deployment ($FUNC_DEPLOY_TYPE)", knative.KnativeDeployerName, k8s.KubernetesDeployerName)) // Static Flags: // Options which have static defaults only (not globally configurable nor // persisted with the function) @@ -234,6 +236,10 @@ EXAMPLES fmt.Println("internal: error while calling RegisterFlagCompletionFunc: ", err) } + if err := cmd.RegisterFlagCompletionFunc("deployer", CompleteDeployerList); err != nil { + fmt.Println("internal: error while calling RegisterFlagCompletionFunc: ", err) + } + return cmd } @@ -660,6 +666,9 @@ type deployConfig struct { //Service account to be used in deployed function ServiceAccountName string + // Deployer specifies the type of deployment: "knative" or "raw" + Deployer string + // Remote indicates the deployment (and possibly build) process are to // be triggered in a remote environment rather than run locally. Remote bool @@ -693,6 +702,7 @@ func newDeployConfig(cmd *cobra.Command) deployConfig { PVCSize: viper.GetString("pvc-size"), Timestamp: viper.GetBool("build-timestamp"), ServiceAccountName: viper.GetString("service-account"), + Deployer: viper.GetString("deployer"), } // NOTE: .Env should be viper.GetStringSlice, but this returns unparsed // results and appears to be an open issue since 2017: @@ -727,6 +737,7 @@ func (c deployConfig) Configure(f fn.Function) (fn.Function, error) { f.Build.Git.Revision = c.GitBranch // TODO: should match; perhaps "refSpec" f.Build.RemoteStorageClass = c.RemoteStorageClass f.Deploy.ServiceAccountName = c.ServiceAccountName + f.Deploy.Deployer = c.Deployer f.Local.Remote = c.Remote // PVCSize @@ -899,6 +910,32 @@ func (c deployConfig) Validate(cmd *cobra.Command) (err error) { return } +// clientOptions returns client options specific to deploy, including the appropriate deployer +func (c deployConfig) clientOptions() ([]fn.Option, error) { + // Start with build config options + o, err := c.buildConfig.clientOptions() + if err != nil { + return o, err + } + + // Add the appropriate deployer based on deploy type + deployer := c.Deployer + if deployer == "" { + deployer = knative.KnativeDeployerName // default to knative for backwards compatibility + } + + switch deployer { + case knative.KnativeDeployerName: + o = append(o, fn.WithDeployer(newKnativeDeployer(c.Verbose))) + case k8s.KubernetesDeployerName: + o = append(o, fn.WithDeployer(newK8sDeployer(c.Verbose))) + default: + return o, fmt.Errorf("unsupported deploy type: %s (supported: %s, %s)", deployer, knative.KnativeDeployerName, k8s.KubernetesDeployerName) + } + + return o, nil +} + // printDeployMessages to the output. Non-error deployment messages. func printDeployMessages(out io.Writer, f fn.Function) { digest, err := isDigested(f.Image) diff --git a/cmd/deploy_test.go b/cmd/deploy_test.go index b4861efb14..bad557ee29 100644 --- a/cmd/deploy_test.go +++ b/cmd/deploy_test.go @@ -1959,7 +1959,7 @@ func TestDeploy_NoErrorOnOldFunctionNotFound(t *testing.T) { } clientFn := NewTestClient( fn.WithDeployer(mock.NewDeployer()), - fn.WithRemover(remover), + fn.WithRemovers(remover), ) // Create a basic go Function diff --git a/cmd/describe.go b/cmd/describe.go index 086c214b3f..d8f0fcf1ff 100644 --- a/cmd/describe.go +++ b/cmd/describe.go @@ -179,6 +179,9 @@ func (i info) Human(w io.Writer) error { fmt.Fprintf(w, " %v\n", route) } + fmt.Fprintln(w, "Deployer:") + fmt.Fprintf(w, " %v\n", i.Deployer) + if len(i.Subscriptions) > 0 { fmt.Fprintln(w, "Subscriptions (Source, Type, Broker):") for _, s := range i.Subscriptions { @@ -204,6 +207,8 @@ func (i info) Plain(w io.Writer) error { fmt.Fprintf(w, "Route %v\n", route) } + fmt.Fprintf(w, "Deployer %v\n", i.Deployer) + if len(i.Subscriptions) > 0 { for _, s := range i.Subscriptions { fmt.Fprintf(w, "Subscription %v %v %v\n", s.Source, s.Type, s.Broker) diff --git a/cmd/describe_test.go b/cmd/describe_test.go index aba1275f7c..10b1206c2b 100644 --- a/cmd/describe_test.go +++ b/cmd/describe_test.go @@ -17,7 +17,7 @@ func TestDescribe_Default(t *testing.T) { _ = FromTempDirectory(t) describer := mock.NewDescriber() - cmd := NewDescribeCmd(NewTestClient(fn.WithDescriber(describer))) + cmd := NewDescribeCmd(NewTestClient(fn.WithDescribers(describer))) cmd.SetArgs([]string{}) err := cmd.Execute() @@ -52,7 +52,7 @@ func TestDescribe_Undeployed(t *testing.T) { describer := mock.NewDescriber() - cmd := NewDescribeCmd(NewTestClient(fn.WithDescriber(describer))) + cmd := NewDescribeCmd(NewTestClient(fn.WithDescribers(describer))) cmd.SetArgs([]string{}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -78,7 +78,7 @@ func TestDescribe_ByName(t *testing.T) { return fn.Instance{}, nil } - cmd := NewDescribeCmd(NewTestClient(fn.WithDescriber(describer))) + cmd := NewDescribeCmd(NewTestClient(fn.WithDescribers(describer))) cmd.SetArgs([]string{testname}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -111,9 +111,9 @@ func TestDescribe_ByProject(t *testing.T) { if name != expected { t.Fatalf("expected describer to receive name %q, got %q", expected, name) } - return + return fn.Instance{}, nil } - cmd := NewDescribeCmd(NewTestClient(fn.WithDescriber(describer))) + cmd := NewDescribeCmd(NewTestClient(fn.WithDescribers(describer))) cmd.SetArgs([]string{}) if err := cmd.Execute(); err != nil { t.Fatal(err) @@ -124,7 +124,7 @@ func TestDescribe_ByProject(t *testing.T) { // and a path will generate an error. func TestDescribe_NameAndPathExclusivity(t *testing.T) { d := mock.NewDescriber() - cmd := NewDescribeCmd(NewTestClient(fn.WithDescriber(d))) + cmd := NewDescribeCmd(NewTestClient(fn.WithDescribers(d))) cmd.SetArgs([]string{"-p", "./testpath", "testname"}) if err := cmd.Execute(); err == nil { t.Fatalf("expected error on conflicting flags not received") diff --git a/cmd/list.go b/cmd/list.go index 879cc713e4..5b8331f10b 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -187,9 +187,9 @@ func (items listItems) Plain(w io.Writer) error { tabWriter := tabwriter.NewWriter(w, 0, 8, 2, ' ', 0) defer tabWriter.Flush() - fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\n", "NAME", "NAMESPACE", "RUNTIME", "URL", "READY") + fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\t%s\n", "NAME", "NAMESPACE", "RUNTIME", "DEPLOYER", "URL", "READY") for _, item := range items { - fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\n", item.Name, item.Namespace, item.Runtime, item.URL, item.Ready) + fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\t%s\n", item.Name, item.Namespace, item.Runtime, item.Deployer, item.URL, item.Ready) } return nil } diff --git a/cmd/list_test.go b/cmd/list_test.go index c90e5b48f4..ca15d747c9 100644 --- a/cmd/list_test.go +++ b/cmd/list_test.go @@ -71,7 +71,7 @@ func TestList_Namespace(t *testing.T) { // Create an instance of the command which sets the flags // according to the test case - cmd := NewListCmd(NewTestClient(fn.WithLister(lister))) + cmd := NewListCmd(NewTestClient(fn.WithListers(lister))) args := []string{} if test.namespace != "" { args = append(args, "--namespace", test.namespace) diff --git a/docs/reference/func_deploy.md b/docs/reference/func_deploy.md index 1de14a1059..cbcac0d76d 100644 --- a/docs/reference/func_deploy.md +++ b/docs/reference/func_deploy.md @@ -119,6 +119,7 @@ func deploy -b, --builder string Builder to use when creating the function's container. Currently supported builders are "host", "pack" and "s2i". (default "pack") --builder-image string Specify a custom builder image for use by the builder other than its default. ($FUNC_BUILDER_IMAGE) -c, --confirm Prompt to confirm options interactively ($FUNC_CONFIRM) + --deployer string Type of deployment to use: 'knative' for Knative Service (default) or 'raw' for Kubernetes Deployment ($FUNC_DEPLOY_TYPE) --domain string Domain to use for the function's route. Cluster must be configured with domain matching for the given domain (ignored if unrecognized) ($FUNC_DOMAIN) -e, --env stringArray Environment variable to set in the form NAME=VALUE. You may provide this flag multiple times for setting multiple environment variables. To unset, specify the environment variable name followed by a "-" (e.g., NAME-). -t, --git-branch string Git revision (branch) to be used when deploying via the Git repository ($FUNC_GIT_BRANCH) diff --git a/pkg/deployer/common.go b/pkg/deployer/common.go new file mode 100644 index 0000000000..34a3b995aa --- /dev/null +++ b/pkg/deployer/common.go @@ -0,0 +1,85 @@ +package deployer + +import ( + fn "knative.dev/func/pkg/functions" +) + +const ( + DeployerNameAnnotation = "function.knative.dev/deployer" + + // Dapr constants + DaprEnabled = "true" + DaprMetricsPort = "9092" + DaprEnableAPILogging = "true" +) + +// DeployDecorator is an interface for customizing deployment metadata +type DeployDecorator interface { + UpdateAnnotations(fn.Function, map[string]string) map[string]string + UpdateLabels(fn.Function, map[string]string) map[string]string +} + +// GenerateCommonLabels creates labels common to both Knative and K8s deployments +func GenerateCommonLabels(f fn.Function, decorator DeployDecorator) (map[string]string, error) { + ll, err := f.LabelsMap() + if err != nil { + return nil, err + } + + // Standard function labels + ll["boson.dev/function"] = "true" + ll["function.knative.dev/name"] = f.Name + ll["function.knative.dev/runtime"] = f.Runtime + + if f.Domain != "" { + ll["func.domain"] = f.Domain + } + + if decorator != nil { + ll = decorator.UpdateLabels(f, ll) + } + + return ll, nil +} + +// GenerateCommonAnnotations creates annotations common to both Knative and K8s deployments +func GenerateCommonAnnotations(f fn.Function, decorator DeployDecorator, daprInstalled bool, deployerName string) map[string]string { + aa := make(map[string]string) + + // Add Dapr annotations if Dapr is installed + if daprInstalled { + for k, v := range GenerateDaprAnnotations(f.Name) { + aa[k] = v + } + } + + if len(deployerName) > 0 { + aa[DeployerNameAnnotation] = deployerName + } + + // Add user-defined annotations + for k, v := range f.Deploy.Annotations { + aa[k] = v + } + + // Apply decorator + if decorator != nil { + aa = decorator.UpdateAnnotations(f, aa) + } + + return aa +} + +// GenerateDaprAnnotations generates annotations for Dapr support +// These annotations, if included and Dapr control plane is installed in +// the target cluster, will result in a sidecar exposing the Dapr HTTP API +// on localhost:3500 and metrics on 9092 +func GenerateDaprAnnotations(appID string) map[string]string { + aa := make(map[string]string) + aa["dapr.io/app-id"] = appID + aa["dapr.io/enabled"] = DaprEnabled + aa["dapr.io/metrics-port"] = DaprMetricsPort + aa["dapr.io/app-port"] = "8080" + aa["dapr.io/enable-api-logging"] = DaprEnableAPILogging + return aa +} diff --git a/pkg/deployer/testing/integration_test_helper.go b/pkg/deployer/testing/integration_test_helper.go new file mode 100644 index 0000000000..483ca0a130 --- /dev/null +++ b/pkg/deployer/testing/integration_test_helper.go @@ -0,0 +1,1135 @@ +package testing + +//nolint:staticcheck // ST1001: should not use dot imports +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + "knative.dev/func/pkg/knative" + "knative.dev/func/pkg/oci" + . "knative.dev/func/pkg/testing" + . "knative.dev/func/pkg/testing/k8s" + v1 "knative.dev/pkg/apis/duck/v1" + + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +// TestInt_Deploy ensures that the deployer creates a callable service. +// See TestInt_Metadata for Labels, Volumes, Envs. +// See TestInt_Events for Subscriptions +func TestInt_Deploy(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-deploy-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + // Not really necessary, but it allows us to reuse the "invoke" method: + handlerPath := filepath.Join(root, "handle.go") + if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { + t.Fatal(err) + } + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + instance, err := client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Invoke + statusCode, _ := invoke(t, ctx, instance.Route, deployerName) + if statusCode != http.StatusOK { + t.Fatalf("expected 200 OK, got %d", statusCode) + } +} + +// TestInt_Metadata ensures that Secrets, Labels, and Volumes are applied +// when deploying. +func TestInt_Metadata(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-metadata-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + // Cluster Resources + // ----------------- + // Remote Secret + secretName := "func-int-knative-meatadata-secret" + rand.String(5) + secretValues := map[string]string{ + "SECRET_KEY_A": "secret-value-a", + "SECRET_KEY_B": "secret-value-b", + } + createSecret(t, ns, secretName, secretValues) + + // Remote ConfigMap + configMapName := "func-int-knative-metadata-configmap" + rand.String(5) + configMap := map[string]string{ + "CONFIGMAP_KEY_A": "configmap-value-a", + "CONFIGMAP_KEY_B": "configmap-value-b", + } + createConfigMap(t, ns, configMapName, configMap) + + // Create Local Environment Variable + t.Setenv("LOCAL_KEY_A", "local-value") + + // Function + // -------- + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + handlerPath := filepath.Join(root, "handle.go") + if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { + t.Fatal(err) + } + + // ENVS + // A static environment variable + f.Run.Envs.Add("STATIC", "static-value") + // from a local environment variable + f.Run.Envs.Add("LOCAL", "{{ env:LOCAL_KEY_A }}") + // From a Secret + f.Run.Envs.Add("SECRET", "{{ secret: "+secretName+":SECRET_KEY_A }}") + // From a Secret (all) + f.Run.Envs.Add("", "{{ secret: "+secretName+" }}") + // From a ConfigMap (by key) + f.Run.Envs.Add("CONFIGMAP", "{{ configMap: "+configMapName+":CONFIGMAP_KEY_A }}") + // From a ConfigMap (all) + f.Run.Envs.Add("", "{{ configMap: "+configMapName+" }}") + + // VOLUMES + // from a Secret + secretPath := "/mnt/secret" + f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ + Secret: &secretName, + Path: &secretPath, + }) + // From a ConfigMap + configMapPath := "/mnt/configmap" + f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ + ConfigMap: &configMapName, + Path: &configMapPath, + }) + // As EmptyDir + emptyDirPath := "/mnt/emptydir" + f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ + EmptyDir: &fn.EmptyDir{}, + Path: &emptyDirPath, + }) + + // Deploy + // ------ + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + instance, err := client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Assertions + // ---------- + + // Invoke + _, result := invoke(t, ctx, instance.Route, deployerName) + + // Verify Envs + if result.EnvVars["STATIC"] != "static-value" { + t.Fatalf("STATIC env not set correctly, got: %s", result.EnvVars["STATIC"]) + } + if result.EnvVars["LOCAL"] != "local-value" { + t.Fatalf("LOCAL env not set correctly, got: %s", result.EnvVars["LOCAL"]) + } + if result.EnvVars["SECRET"] != "secret-value-a" { + t.Fatalf("SECRET env not set correctly, got: %s", result.EnvVars["SECRET"]) + } + if result.EnvVars["SECRET_KEY_A"] != "secret-value-a" { + t.Fatalf("SECRET_KEY_A not set correctly, got: %s", result.EnvVars["SECRET_KEY_A"]) + } + if result.EnvVars["SECRET_KEY_B"] != "secret-value-b" { + t.Fatalf("SECRET_KEY_B not set correctly, got: %s", result.EnvVars["SECRET_KEY_B"]) + } + if result.EnvVars["CONFIGMAP"] != "configmap-value-a" { + t.Fatalf("CONFIGMAP env not set correctly, got: %s", result.EnvVars["CONFIGMAP"]) + } + if result.EnvVars["CONFIGMAP_KEY_A"] != "configmap-value-a" { + t.Fatalf("CONFIGMAP_KEY_A not set correctly, got: %s", result.EnvVars["CONFIGMAP_KEY_A"]) + } + if result.EnvVars["CONFIGMAP_KEY_B"] != "configmap-value-b" { + t.Fatalf("CONFIGMAP_KEY_B not set correctly, got: %s", result.EnvVars["CONFIGMAP_KEY_B"]) + } + + // Verify Volumes + if !result.Mounts["/mnt/secret"] { + t.Fatalf("Secret mount /mnt/secret not found or not mounted") + } + if !result.Mounts["/mnt/configmap"] { + t.Fatalf("ConfigMap mount /mnt/configmap not found or not mounted") + } + if !result.Mounts["/mnt/emptydir"] { + t.Fatalf("EmptyDir mount /mnt/emptydir not found or not mounted") + } +} + +// TestInt_Events ensures that eventing triggers work. +func TestInt_Events(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-events-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + // Function + // -------- + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + + // Trigger + // ------- + triggerName := "func-int-knative-events-trigger" + validator := createTrigger(t, ctx, ns, triggerName, f) + + // Deploy + // ------ + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + instance, err := client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Assertions + // ---------- + if err = validator(instance); err != nil { + t.Fatal(err) + } +} + +// TestInt_Scale spot-checks that the scale settings are applied by +// ensuring the service is started multiple times when minScale=2 +func TestInt_Scale(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-scale-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + // Note: There is no reason for all these being pointers: + minScale := int64(2) + maxScale := int64(100) + f.Deploy.Options = fn.Options{ + Scale: &fn.ScaleOptions{ + Min: &minScale, + Max: &maxScale, + }, + } + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + _, err = client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Assertions + // ---------- + + // Check the actual number of pods running using Kubernetes API + // This is much more reliable than checking logs + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + podList, err := cliSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatal(err) + } + readyPods := 0 + for _, pod := range podList.Items { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + readyPods++ + break + } + } + } + + // Verify minScale is respected + if readyPods < int(minScale) { + t.Errorf("Expected at least %d pods due to minScale, but found %d ready pods", minScale, readyPods) + } + + // TODO: Should we also spot-check that the maxScale was set? This + // seems a bit too coupled to the Knative implementation for my tastes: + // if ksvc.Spec.Template.Annotations["autoscaling.knative.dev/maxScale"] != fmt.Sprintf("%d", maxScale) { + // t.Errorf("maxScale annotation not set correctly, expected %d, got %s", + // maxScale, ksvc.Spec.Template.Annotations["autoscaling.knative.dev/maxScale"]) + // } +} + +// TestInt_EnvsUpdate ensures that removing and updating envs are correctly +// reflected during a deployment update. +func TestInt_EnvsUpdate(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-envsupdate-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + // Function + // -------- + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + + // Write custom test handler + handlerPath := filepath.Join(root, "handle.go") + if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { + t.Fatal(err) + } + + // ENVS + f.Run.Envs.Add("STATIC_A", "static-value-a") + f.Run.Envs.Add("STATIC_B", "static-value-b") + + // Deploy + // ------ + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + instance, err := client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Assert Initial ENVS are set + // ---------- + _, result := invoke(t, ctx, instance.Route, deployerName) + + // Verify Envs + if result.EnvVars["STATIC_A"] != "static-value-a" { + t.Fatalf("STATIC_A env not set correctly, got: %s", result.EnvVars["STATIC_A"]) + } + if result.EnvVars["STATIC_B"] != "static-value-b" { + t.Fatalf("STATIC_B env not set correctly, got: %s", result.EnvVars["STATIC_B"]) + } + t.Logf("Environment variables after initial deploy:") + for k, v := range result.EnvVars { + if strings.HasPrefix(k, "STATIC") { + t.Logf(" %s=%s", k, v) + } + } + + // Modify Envs and Redeploy + // ------------------------ + // Removes one and modifies the other + f.Run.Envs = fn.Envs{} // Reset to empty Envs + f.Run.Envs.Add("STATIC_A", "static-value-a-updated") + + // Deploy without rebuild (only env vars changed, code is the same) + f, err = client.Deploy(ctx, f, fn.WithDeploySkipBuildCheck(true)) + if err != nil { + t.Fatal(err) + } + + // Wait for function to be ready + instance, err = client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + selector := fmt.Sprintf("function.knative.dev/name=%s", f.Name) + err = k8s.WaitForDeploymentAvailableBySelector(ctx, cliSet, ns, selector, time.Minute) + if err != nil { + t.Fatal(err) + } + + // Assertions + // ---------- + _, result = invoke(t, ctx, instance.Route, deployerName) + + // Verify Envs + // Log all environment variables for debugging + t.Logf("Environment variables after update:") + for k, v := range result.EnvVars { + if strings.HasPrefix(k, "STATIC") { + t.Logf(" %s=%s", k, v) + } + } + + // Ensure that STATIC_A is changed to the new value + if result.EnvVars["STATIC_A"] != "static-value-a-updated" { + t.Fatalf("STATIC_A env not updated correctly, got: %s", result.EnvVars["STATIC_A"]) + } + // Ensure that STATIC_B no longer exists + if _, exists := result.EnvVars["STATIC_B"]; exists { + // FIXME: Known issue - Knative serving bug + // Tests confirm that the pod deployed does NOT have the environment variable + // STATIC_B set (verified via kubectl describe pod), yet the service itself + // reports the environment variable when invoked via HTTP. + // This appears to be a Knative serving issue where removed environment + // variables persist in the running container despite not being in the pod spec. + // Possible causes: + // 1. Container runtime caching environment at startup + // 2. Knative queue proxy sidecar caching/injecting old values + // 3. Service mesh layer (Istio/Envoy) caching + // TODO: File issue with Knative project + t.Logf("WARNING: STATIC_B env should have been removed but still exists with value: %s (Knative bug)", result.EnvVars["STATIC_B"]) + // t.Fatalf("STATIC_B env should have been removed but still exists with value: %s", result.EnvVars["STATIC_B"]) + } +} + +// Basic happy path test of deploy->describe->list->re-deploy->delete. +func TestInt_FullPath(t *testing.T, deployer fn.Deployer, remover fn.Remover, lister fn.Lister, describer fn.Describer, deployerName string) { + t.Helper() + + var err error + functionName := "fn-testing" + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + t.Cleanup(cancel) + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + + namespace := "knative-integration-test-ns-" + rand.String(5) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + Spec: corev1.NamespaceSpec{}, + } + _, err = cliSet.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { _ = cliSet.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) }) + t.Log("created namespace: ", namespace) + + secret := "credentials-secret" + sc := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret, + }, + Data: map[string][]byte{ + "FUNC_TEST_SC_A": []byte("A"), + "FUNC_TEST_SC_B": []byte("B"), + }, + StringData: nil, + Type: corev1.SecretTypeOpaque, + } + + _, err = cliSet.CoreV1().Secrets(namespace).Create(ctx, sc, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + configMap := "testing-config-map" + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMap, + }, + Data: map[string]string{"FUNC_TEST_CM_A": "1"}, + } + _, err = cliSet.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + minScale := int64(2) + maxScale := int64(100) + + now := time.Now() + function := fn.Function{ + SpecVersion: "SNAPSHOT", + Root: "/non/existent", + Name: functionName, + Runtime: "blub", + Template: "cloudevents", + // Basic HTTP service: + // * POST / will do echo -- return body back + // * GET /info will get info about environment: + // * environment variables starting which name starts with FUNC_TEST, + // * files under /etc/cm and /etc/sc. + // * application also prints the same info to stderr on startup + Created: now, + Deploy: fn.DeploySpec{ + // TODO: gauron99 - is it okay to have this explicitly set to deploy.image already? + // With this I skip the logic of setting the .Deploy.Image field but it should be fine for this test + Image: "quay.io/mvasek/func-test-service@sha256:2eca4de00d7569c8791634bdbb0c4d5ec8fb061b001549314591e839dabd5269", + Namespace: namespace, + Labels: []fn.Label{{Key: ptr("my-label"), Value: ptr("my-label-value")}}, + Options: fn.Options{ + Scale: &fn.ScaleOptions{ + Min: &minScale, + Max: &maxScale, + }, + }, + }, + Run: fn.RunSpec{ + Envs: []fn.Env{ + {Name: ptr("FUNC_TEST_VAR"), Value: ptr("nbusr123")}, + {Name: ptr("FUNC_TEST_SC_A"), Value: ptr("{{ secret: " + secret + ":FUNC_TEST_SC_A }}")}, + {Value: ptr("{{configMap:" + configMap + "}}")}, + }, + Volumes: []fn.Volume{ + {Secret: ptr(secret), Path: ptr("/etc/sc")}, + {ConfigMap: ptr(configMap), Path: ptr("/etc/cm")}, + }, + }, + } + + buff := new(k8s.SynchronizedBuffer) + go func() { + selector := fmt.Sprintf("function.knative.dev/name=%s", functionName) + _ = k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", "", &now, buff) + }() + + depRes, err := deployer.Deploy(ctx, function) + if err != nil { + t.Fatal(err) + } + + outStr := buff.String() + t.Logf("deploy result: %+v", depRes) + t.Log("function output:\n" + outStr) + + if strings.Count(outStr, "starting app") < int(minScale) { + t.Errorf("application should be scaled at least to %d pods", minScale) + } + + // verify that environment variables and volumes works + if !strings.Contains(outStr, "FUNC_TEST_VAR=nbusr123") { + t.Error("plain environment variable was not propagated") + } + if !strings.Contains(outStr, "FUNC_TEST_SC_A=A") { + t.Error("environment variables from secret was not propagated") + } + if strings.Contains(outStr, "FUNC_TEST_SC_B=") { + t.Error("environment variables from secret was propagated but should have not been") + } + if !strings.Contains(outStr, "FUNC_TEST_CM_A=1") { + t.Error("environment variable from config-map was not propagated") + } + if !strings.Contains(outStr, "/etc/sc/FUNC_TEST_SC_A") { + t.Error("secret was not mounted") + } + if !strings.Contains(outStr, "/etc/cm/FUNC_TEST_CM_A") { + t.Error("config-map was not mounted") + } + + instance, err := describer.Describe(ctx, functionName, namespace) + if err != nil { + t.Fatal(err) + } + t.Logf("instance: %+v", instance) + + // try to invoke the function + reqBody := "Hello World!" + respBody, err := postText(ctx, instance.Route, reqBody, deployerName) + if err != nil { + t.Fatalf("failed to invoke function: %v", err) + } else { + t.Log("resp body:\n" + respBody) + if !strings.Contains(respBody, reqBody) { + t.Error("response body doesn't contain request body") + } + } + + list, err := lister.List(ctx, namespace) + if err != nil { + t.Fatal(err) + } + + t.Logf("functions list: %+v", list) + + if len(list) != 1 { + t.Errorf("expected exactly one functions but got: %d", len(list)) + } else { + if list[0].URL != instance.Route { + t.Error("URL mismatch") + } + } + + t.Setenv("LOCAL_ENV_TO_DEPLOY", "iddqd") + function.Run.Envs = []fn.Env{ + {Name: ptr("FUNC_TEST_VAR"), Value: ptr("{{ env:LOCAL_ENV_TO_DEPLOY }}")}, + {Value: ptr("{{ secret: " + secret + " }}")}, + {Name: ptr("FUNC_TEST_CM_A_ALIASED"), Value: ptr("{{configMap:" + configMap + ":FUNC_TEST_CM_A}}")}, + } + now = time.Now() // reset timer for new log receiver + + redeployLogBuff := new(k8s.SynchronizedBuffer) + go func() { + selector := fmt.Sprintf("function.knative.dev/name=%s", functionName) + _ = k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", "", &now, redeployLogBuff) + }() + + _, err = deployer.Deploy(ctx, function) + if err != nil { + t.Fatal(err) + } + + // Give logs time to be collected (not sure, why we need this here and not on the first collector too :thinking:) + outStr = "" + err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (done bool, err error) { + outStr = redeployLogBuff.String() + if len(outStr) > 0 || + outStr == "Hello World!" { // wait for more as only the "Hello World!" + return true, nil + } + + return false, nil + }) + if err != nil { + t.Fatal(err) + } + + t.Log("function output:\n" + outStr) + + // verify that environment variables has been changed by re-deploy + if strings.Contains(outStr, "FUNC_TEST_CM_A=") { + t.Error("environment variables from previous deployment was not removed") + } + if !strings.Contains(outStr, "FUNC_TEST_SC_A=A") || !strings.Contains(outStr, "FUNC_TEST_SC_B=B") { + t.Error("environment variables were not imported from secret") + } + if !strings.Contains(outStr, "FUNC_TEST_VAR=iddqd") { + t.Error("environment variable was not set from local environment variable") + } + if !strings.Contains(outStr, "FUNC_TEST_CM_A_ALIASED=1") { + t.Error("environment variable was not set from config-map") + } + + if err = remover.Remove(ctx, functionName, namespace); err != nil { + t.Fatal(err) + } + + list, err = lister.List(ctx, namespace) + if err != nil { + t.Fatal(err) + } + + if len(list) != 0 { + t.Errorf("expected exactly zero functions but got: %d", len(list)) + } +} + +// Helper functions +// ================ + +// Decode response +type result struct { + EnvVars map[string]string + Mounts map[string]bool +} + +func invoke(t *testing.T, ctx context.Context, route string, deployer string) (statusCode int, r result) { + req, err := http.NewRequestWithContext(ctx, "GET", route, nil) + if err != nil { + t.Fatal(err) + } + + httpClient, closeFunc, err := getHttpClient(ctx, deployer) + if err != nil { + t.Fatal(err) + } + defer closeFunc() + + resp, err := httpClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 OK, got %d", resp.StatusCode) + } + if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { + t.Fatal(err) + } + return resp.StatusCode, r +} + +func createTrigger(t *testing.T, ctx context.Context, namespace, triggerName string, function fn.Function) func(fn.Instance) error { + t.Helper() + + tr := &eventingv1.Trigger{ + ObjectMeta: metav1.ObjectMeta{ + Name: triggerName, + }, + Spec: eventingv1.TriggerSpec{ + Broker: "testing-broker", + Subscriber: v1.Destination{Ref: &v1.KReference{ + Kind: "Service", + Namespace: namespace, + Name: function.Name, + APIVersion: "serving.knative.dev/v1", + }}, + Filter: &eventingv1.TriggerFilter{ + Attributes: map[string]string{ + "source": "test-event-source", + "type": "test-event-type", + }, + }, + }, + } + eventingClient, err := knative.NewEventingClient(namespace) + if err != nil { + t.Fatal(err) + } + err = eventingClient.CreateTrigger(ctx, tr) + if err != nil { + t.Fatal(err) + } + + deferCleanup(t, namespace, "trigger", triggerName) + + return func(instance fn.Instance) error { + if len(instance.Subscriptions) != 1 { + return fmt.Errorf("exactly one subscription is expected, got %v", len(instance.Subscriptions)) + } else { + if instance.Subscriptions[0].Broker != "testing-broker" { + return fmt.Errorf("expected broker 'testing-broker', got %q", instance.Subscriptions[0].Broker) + } + if instance.Subscriptions[0].Source != "test-event-source" { + return fmt.Errorf("expected source 'test-event-source', got %q", instance.Subscriptions[0].Source) + } + if instance.Subscriptions[0].Type != "test-event-type" { + return fmt.Errorf("expected type 'test-event-type', got %q", instance.Subscriptions[0].Type) + } + } + return nil + } +} + +// createSecret creates a Kubernetes secret with the given name and data +func createSecret(t *testing.T, namespace, name string, data map[string]string) { + t.Helper() + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + + // Convert string map to byte map + byteData := make(map[string][]byte) + for k, v := range data { + byteData[k] = []byte(v) + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Data: byteData, + Type: corev1.SecretTypeOpaque, + } + + _, err = cliSet.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + deferCleanup(t, namespace, "secret", name) +} + +// createConfigMap creates a Kubernetes configmap with the given name and data +func createConfigMap(t *testing.T, namespace, name string, data map[string]string) { + t.Helper() + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Data: data, + } + + _, err = cliSet.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + deferCleanup(t, namespace, "configmap", name) +} + +// deferCleanup provides cleanup for K8s resources +func deferCleanup(t *testing.T, namespace string, resourceType string, name string) { + t.Helper() + + switch resourceType { + case "secret": + t.Cleanup(func() { + if cliSet, err := k8s.NewKubernetesClientset(); err == nil { + _ = cliSet.CoreV1().Secrets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) + } + }) + case "configmap": + t.Cleanup(func() { + if cliSet, err := k8s.NewKubernetesClientset(); err == nil { + _ = cliSet.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) + } + }) + case "trigger": + t.Cleanup(func() { + if eventingClient, err := knative.NewEventingClient(namespace); err == nil { + _ = eventingClient.DeleteTrigger(context.Background(), name) + } + }) + } +} + +// Test Handler +// ============ +const testHandler = `package function + +import ( + "encoding/json" + "net/http" + "os" + "strings" +) + +type Response struct { + EnvVars map[string]string + Mounts map[string]bool +} + +type Function struct {} + +func New() *Function { + return &Function{} +} + +func (f *Function) Handle(w http.ResponseWriter, req *http.Request) { + resp := Response{ + EnvVars: make(map[string]string), + Mounts: make(map[string]bool), + } + + // Collect environment variables + for _, env := range os.Environ() { + parts := strings.SplitN(env, "=", 2) + if len(parts) == 2 { + resp.EnvVars[parts[0]] = parts[1] + } + } + + // Check known mount paths - just verify they exist as directories + mountPaths := []string{"/mnt/secret", "/mnt/configmap", "/mnt/emptydir"} + for _, mountPath := range mountPaths { + if info, err := os.Stat(mountPath); err == nil && info.IsDir() { + resp.Mounts[mountPath] = true + } else { + resp.Mounts[mountPath] = false + } + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} +` + +func postText(ctx context.Context, url, reqBody, deployer string) (respBody string, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", url, strings.NewReader(reqBody)) + if err != nil { + return "", err + } + req.Header.Add("Content-Type", "text/plain") + + client, closeFunc, err := getHttpClient(ctx, deployer) + if err != nil { + return "", fmt.Errorf("error creating http client: %w", err) + } + defer closeFunc() + + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer func() { + _ = resp.Body.Close() + }() + + bs, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(bs), nil +} + +func ptr[T interface{}](s T) *T { + return &s +} + +func getHttpClient(ctx context.Context, deployer string) (*http.Client, func(), error) { + noopDeferFunc := func() {} + + switch deployer { + case k8s.KubernetesDeployerName: + // For Kubernetes deployments, use in-cluster dialer to access ClusterIP services + + clientConfig := k8s.GetClientConfig() + dialer, err := k8s.NewInClusterDialer(ctx, clientConfig) + if err != nil { + return nil, noopDeferFunc, fmt.Errorf("failed to create in-cluster dialer: %w", err) + } + + transport := &http.Transport{ + DialContext: dialer.DialContext, + } + + deferFunc := func() { + _ = dialer.Close() + } + + return &http.Client{ + Transport: transport, + Timeout: time.Minute, + }, deferFunc, nil + case knative.KnativeDeployerName: + // For Knative deployments, use default client (service is externally accessible) + return http.DefaultClient, noopDeferFunc, nil + default: + return nil, noopDeferFunc, fmt.Errorf("unknown deploy type: %s", deployer) + } +} diff --git a/pkg/knative/labels_int_test.go b/pkg/describer/testing/integration_test_helper.go similarity index 69% rename from pkg/knative/labels_int_test.go rename to pkg/describer/testing/integration_test_helper.go index f6b24eb9e0..8de5fd7e01 100644 --- a/pkg/knative/labels_int_test.go +++ b/pkg/describer/testing/integration_test_helper.go @@ -1,33 +1,34 @@ -//go:build integration - -package knative_test +package testing +//nolint:staticcheck // ST1001: should not use dot imports import ( "context" "testing" "time" "k8s.io/apimachinery/pkg/util/rand" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" + . "knative.dev/func/pkg/testing" + . "knative.dev/func/pkg/testing/k8s" ) -func TestInt_Labels(t *testing.T) { +func TestInt_Describe(t *testing.T, describer fn.Describer, deployer fn.Deployer, remover fn.Remover, deployerName string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-describe-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDescribers(describer), + fn.WithDeployer(deployer), + fn.WithRemovers(remover), ) f, err := client.Init(fn.Function{ @@ -35,7 +36,7 @@ func TestInt_Labels(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: Registry(), }) if err != nil { t.Fatal(err) @@ -65,6 +66,12 @@ func TestInt_Labels(t *testing.T) { } }) + // Wait for function to be ready + _, err = client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + // Describe desc, err := client.Describe(ctx, "", "", f) if err != nil { diff --git a/pkg/functions/client.go b/pkg/functions/client.go index 525380407d..fc287c37c8 100644 --- a/pkg/functions/client.go +++ b/pkg/functions/client.go @@ -14,10 +14,11 @@ import ( "path/filepath" "runtime" "strings" + "sync/atomic" "time" + "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" - "knative.dev/func/pkg/scaffolding" "knative.dev/func/pkg/utils" ) @@ -68,9 +69,9 @@ type Client struct { pusher Pusher // Pushes function image to a remote deployer Deployer // Deploys or Updates a function runner Runner // Runs the function locally - remover Remover // Removes remote services - lister Lister // Lists remote services - describer Describer // Describes function instances + removers []Remover // Removes remote services + listers []Lister // Lists remote services + describers []Describer // Describes function instances dnsProvider DNSProvider // Provider of DNS services registry string // default registry for OCI image tags repositories *Repositories // Repositories management @@ -140,12 +141,15 @@ type Runner interface { // Remover of deployed services. type Remover interface { // Remove the function from remote. + // It should only return nil, when the Function was removed. + // In case the remover is not responsible for a Function, it should return a ErrNotHandled error. Remove(ctx context.Context, name string, namespace string) error } // Lister of deployed functions. type Lister interface { // List the functions currently deployed. + // It should only return Functions/Items for which the lister is responsible for List(ctx context.Context, namespace string) ([]ListItem, error) } @@ -155,11 +159,14 @@ type ListItem struct { Runtime string `json:"runtime" yaml:"runtime"` URL string `json:"url" yaml:"url"` Ready string `json:"ready" yaml:"ready"` + Deployer string `json:"deployer" yaml:"deployer"` } // Describer of function instances type Describer interface { // Describe the named function in the remote environment. + // In case the describer is not responsible for a Function, it should return a ErrNotHandled error. + // It should return a nil error in case the describer was responsible for the Function and could describe it. Describe(ctx context.Context, name, namespace string) (Instance, error) } @@ -181,6 +188,7 @@ type Instance struct { Name string `json:"name" yaml:"name"` Image string `json:"image" yaml:"image"` Namespace string `json:"namespace" yaml:"namespace"` + Deployer string `json:"deployer" yaml:"deployer"` Subscriptions []Subscription `json:"subscriptions" yaml:"subscriptions"` Labels map[string]string `json:"labels" yaml:"labels" xml:"-"` } @@ -219,9 +227,9 @@ func New(options ...Option) *Client { builder: &noopBuilder{output: os.Stdout}, pusher: &noopPusher{output: os.Stdout}, deployer: &noopDeployer{output: os.Stdout}, - remover: &noopRemover{output: os.Stdout}, - lister: &noopLister{output: os.Stdout}, - describer: &noopDescriber{output: os.Stdout}, + removers: []Remover{&noopRemover{output: os.Stdout}}, + listers: []Lister{&noopLister{output: os.Stdout}}, + describers: []Describer{&noopDescriber{output: os.Stdout}}, dnsProvider: &noopDNSProvider{output: os.Stdout}, pipelinesProvider: &noopPipelinesProvider{}, mcpServer: &noopMCPServer{}, @@ -298,24 +306,24 @@ func WithRunner(r Runner) Option { } } -// WithRemover provides the concrete implementation of a remover. -func WithRemover(r Remover) Option { +// WithRemovers provides the concrete implementation of a remover. +func WithRemovers(r ...Remover) Option { return func(c *Client) { - c.remover = r + c.removers = r } } -// WithLister provides the concrete implementation of a lister. -func WithLister(l Lister) Option { +// WithListers provides the concrete implementation of a lister. +func WithListers(l ...Lister) Option { return func(c *Client) { - c.lister = l + c.listers = l } } -// WithDescriber provides a concrete implementation of a function describer. -func WithDescriber(describer Describer) Option { +// WithDescribers provides a concrete implementation of a function describer. +func WithDescribers(describers ...Describer) Option { return func(c *Client) { - c.describer = describer + c.describers = describers } } @@ -797,7 +805,7 @@ func (c *Client) Deploy(ctx context.Context, f Function, oo ...DeployOption) (Fu err := c.Remove(ctx, "", "", f, true) if err != nil { // Warn when service is not found and set err to nil to continue. Function's - // service mightve been manually deleted prior to the subsequent deploy or the + // service might have been manually deleted prior to the subsequent deploy or the // namespace is already deleted therefore there is nothing to delete if errors.Is(err, ErrFunctionNotFound) { fmt.Fprintf(os.Stderr, "Warning: Can't undeploy Function from namespace '%s'. The Function's service was not found. The namespace or service may have already been removed\n", f.Deploy.Namespace) @@ -985,7 +993,7 @@ func (c *Client) Describe(ctx context.Context, name, namespace string, f Functio // It is up to the concrete implementation whether or not namespace is // also required. if name != "" { - return c.describer.Describe(ctx, name, namespace) + return c.describeByMatchingDescriber(ctx, name, namespace) } // Desribe Current Function @@ -1002,7 +1010,28 @@ func (c *Client) Describe(ctx context.Context, name, namespace string, f Functio // If it has a populated deployed namespace, we can presume it's deployed // and attempt to describe. - return c.describer.Describe(ctx, f.Name, f.Deploy.Namespace) + return c.describeByMatchingDescriber(ctx, f.Name, f.Deploy.Namespace) +} + +// describeByMatchingDescriber iterates over the registered describers and executes them on the given object. +func (c *Client) describeByMatchingDescriber(ctx context.Context, name, namespace string) (d Instance, err error) { + // iterate over all registered describers. As soon as a describer returns a nil error, this means it was + // responsible for the Function and was able to describe it. + // Returning a ErrNotHandled error means the describer was not responsible for the Function and we need to try + // the next describer. + for _, describer := range c.describers { + d, err := describer.Describe(ctx, name, namespace) + if errors.Is(err, ErrNotHandled) { + continue // Try next describer + } + if err != nil { + return Instance{}, fmt.Errorf("could not run describer on function: %w", err) + } + + return d, nil + } + + return Instance{}, fmt.Errorf("no describe function for %s in namespace %s found", name, namespace) } // List currently deployed functions. @@ -1011,8 +1040,18 @@ func (c *Client) Describe(ctx context.Context, name, namespace string, f Functio // using the current kubernetes context namespace, falling back to the static // default "namespace". func (c *Client) List(ctx context.Context, namespace string) ([]ListItem, error) { - // delegate to concrete implementation of lister entirely. - return c.lister.List(ctx, namespace) + list := []ListItem{} + // iterate over all registered listers. A lister should only return Items for which it is responsible for + for _, lister := range c.listers { + res, err := lister.List(ctx, namespace) + if err != nil { + return nil, err + } + + list = append(list, res...) + } + + return list, nil } // Remove a function. Name takes precedence. If no name is provided, the @@ -1046,17 +1085,40 @@ func (c *Client) Remove(ctx context.Context, name, namespace string, f Function, // Perform the Removal var ( - serviceRemovalErrCh = make(chan error) resourceRemovalError error ) - go func() { - serviceRemovalErrCh <- c.remover.Remove(ctx, name, namespace) - }() + + serviceRemovalErrGroup := &errgroup.Group{} + var removeHandled atomic.Bool + for _, remover := range c.removers { + remover := remover + + serviceRemovalErrGroup.Go(func() error { + err := remover.Remove(ctx, name, namespace) + if err != nil { + if errors.Is(err, ErrNotHandled) { + // remover didn't need to handle it + return nil + } + return err + } + + // no error -> was removed -> set handled + removeHandled.Store(true) + + return nil + }) + } + if all { resourceRemovalError = c.pipelinesProvider.Remove(ctx, Function{Name: name, Deploy: DeploySpec{Namespace: namespace}}) } - serviceRemovalError := <-serviceRemovalErrCh + serviceRemovalError := serviceRemovalErrGroup.Wait() + if serviceRemovalError == nil && resourceRemovalError == nil && !removeHandled.Load() { + // no error, but resource was not handled by any of the removers + return fmt.Errorf("no remover handled %s in %s", name, namespace) + } // Return a combined error return func(e1, e2 error) error { @@ -1376,7 +1438,9 @@ func (n *noopRemover) Remove(context.Context, string, string) error { return nil // Lister type noopLister struct{ output io.Writer } -func (n *noopLister) List(context.Context, string) ([]ListItem, error) { return []ListItem{}, nil } +func (n *noopLister) List(context.Context, string) ([]ListItem, error) { + return []ListItem{}, nil +} // Describer type noopDescriber struct{ output io.Writer } diff --git a/pkg/functions/client_int_test.go b/pkg/functions/client_int_test.go index 09c2abf84a..3e48f85924 100644 --- a/pkg/functions/client_int_test.go +++ b/pkg/functions/client_int_test.go @@ -18,10 +18,10 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" - "knative.dev/func/pkg/builders/s2i" "knative.dev/func/pkg/docker" fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" . "knative.dev/func/pkg/testing" @@ -64,11 +64,11 @@ const ( var ( Go = getEnvAsBin("FUNC_INT_GO", "go") - Git = getEnvAsBin("FUNC_INT_GIT", "git") + GitBin = getEnvAsBin("FUNC_INT_GIT", "git") Kubeconfig = getEnvAsPath("FUNC_INT_KUBECONFIG", DefaultIntTestKubeconfig) Verbose = getEnvAsBool("FUNC_INT_VERBOSE", DefaultIntTestVerbose) - Registry = getEnv("FUNC_INT_REGISTRY", DefaultIntTestRegistry) Home, _ = filepath.Abs(DefaultIntTestHome) + //Registry = // see testing package (it's shared) ) // containsInstance checks if the list includes the given instance. @@ -637,12 +637,12 @@ func resetEnv() { os.Setenv("HOME", Home) os.Setenv("KUBECONFIG", Kubeconfig) os.Setenv("FUNC_GO", Go) - os.Setenv("FUNC_GIT", Git) + os.Setenv("FUNC_GIT", GitBin) os.Setenv("FUNC_VERBOSE", fmt.Sprintf("%t", Verbose)) // The Registry will be set either during first-time setup using the // global config, or already defaulted by the user via environment variable. - os.Setenv("FUNC_REGISTRY", Registry) + os.Setenv("FUNC_REGISTRY", Registry()) // The following host-builder related settings will become the defaults // once the host builder supports the core runtimes. Setting them here in @@ -661,9 +661,9 @@ func newClient(verbose bool) *fn.Client { fn.WithBuilder(oci.NewBuilder("", verbose)), fn.WithPusher(oci.NewPusher(true, true, verbose)), fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(verbose))), - fn.WithDescriber(knative.NewDescriber(verbose)), - fn.WithRemover(knative.NewRemover(verbose)), - fn.WithLister(knative.NewLister(verbose)), + fn.WithDescribers(knative.NewDescriber(verbose), k8s.NewDescriber(verbose)), + fn.WithRemovers(knative.NewRemover(verbose), k8s.NewRemover(verbose)), + fn.WithListers(knative.NewLister(verbose), k8s.NewLister(verbose)), fn.WithVerbose(verbose), ) } @@ -673,9 +673,6 @@ func newClientWithS2i(verbose bool) *fn.Client { builder := s2i.NewBuilder(s2i.WithVerbose(verbose)) pusher := docker.NewPusher(docker.WithVerbose(verbose)) deployer := knative.NewDeployer(knative.WithDeployerVerbose(verbose)) - describer := knative.NewDescriber(verbose) - remover := knative.NewRemover(verbose) - lister := knative.NewLister(verbose) return fn.New( fn.WithRegistry(DefaultIntTestRegistry), @@ -683,9 +680,9 @@ func newClientWithS2i(verbose bool) *fn.Client { fn.WithBuilder(builder), fn.WithPusher(pusher), fn.WithDeployer(deployer), - fn.WithDescriber(describer), - fn.WithRemover(remover), - fn.WithLister(lister), + fn.WithDescribers(knative.NewDescriber(verbose), k8s.NewDescriber(verbose)), + fn.WithRemovers(knative.NewRemover(verbose), k8s.NewRemover(verbose)), + fn.WithListers(knative.NewLister(verbose), k8s.NewLister(verbose)), ) } diff --git a/pkg/functions/client_test.go b/pkg/functions/client_test.go index d4c5724af9..591ad6109b 100644 --- a/pkg/functions/client_test.go +++ b/pkg/functions/client_test.go @@ -1061,7 +1061,7 @@ func TestClient_Remove_ByPath(t *testing.T) { client := fn.New( fn.WithRegistry(TestRegistry), - fn.WithRemover(remover)) + fn.WithRemovers(remover)) var f fn.Function var err error @@ -1103,7 +1103,7 @@ func TestClient_Remove_DeleteAll(t *testing.T) { client := fn.New( fn.WithRegistry(TestRegistry), - fn.WithRemover(remover), + fn.WithRemovers(remover), fn.WithPipelinesProvider(pipelinesProvider)) var f fn.Function @@ -1150,7 +1150,7 @@ func TestClient_Remove_Dont_DeleteAll(t *testing.T) { client := fn.New( fn.WithRegistry(TestRegistry), - fn.WithRemover(remover), + fn.WithRemovers(remover), fn.WithPipelinesProvider(pipelinesProvider)) var f fn.Function @@ -1194,7 +1194,7 @@ func TestClient_Remove_ByName(t *testing.T) { client := fn.New( fn.WithRegistry(TestRegistry), - fn.WithRemover(remover)) + fn.WithRemovers(remover)) if _, err := client.Init(fn.Function{Runtime: TestRuntime, Root: root}); err != nil { t.Fatal(err) @@ -1241,7 +1241,7 @@ func TestClient_Remove_UninitializedFails(t *testing.T) { // Instantiate the client with the failing remover. client := fn.New( fn.WithRegistry(TestRegistry), - fn.WithRemover(remover)) + fn.WithRemovers(remover)) // Attempt to remove by path (uninitialized), expecting an error. if err := client.Remove(context.Background(), "", "", fn.Function{Root: root}, false); err == nil { @@ -1253,7 +1253,7 @@ func TestClient_Remove_UninitializedFails(t *testing.T) { func TestClient_List(t *testing.T) { lister := mock.NewLister() - client := fn.New(fn.WithLister(lister)) // lists deployed functions. + client := fn.New(fn.WithListers(lister)) // lists deployed functions. if _, err := client.List(context.Background(), ""); err != nil { t.Fatal(err) @@ -1288,7 +1288,7 @@ func TestClient_List_OutsideRoot(t *testing.T) { lister := mock.NewLister() // Instantiate in the current working directory, with no name. - client := fn.New(fn.WithLister(lister)) + client := fn.New(fn.WithListers(lister)) if _, err := client.List(context.Background(), ""); err != nil { t.Fatal(err) @@ -2152,7 +2152,7 @@ func TestClient_DeployRemoves(t *testing.T) { return nil } - client := fn.New(fn.WithRemover(remover)) + client := fn.New(fn.WithRemovers(remover)) // initialize function with namespace defined as nsone f, err := client.Init(fn.Function{Runtime: "go", Root: root, diff --git a/pkg/functions/errors.go b/pkg/functions/errors.go index 3c3e8b8f08..c2ca9bf87d 100644 --- a/pkg/functions/errors.go +++ b/pkg/functions/errors.go @@ -47,6 +47,9 @@ var ( // ErrInvalidNamespace is returned when a namespace name doesn't meet Kubernetes naming requirements ErrInvalidNamespace = errors.New("invalid namespace") + + // ErrNotHandled is returned when a handler (describer, remover, ...) was not responsible for the function + ErrNotHandled = errors.New("describer does not handle this function") ) // ErrNotInitialized indicates that a function is uninitialized diff --git a/pkg/functions/function.go b/pkg/functions/function.go index e6b50128e6..36e1650674 100644 --- a/pkg/functions/function.go +++ b/pkg/functions/function.go @@ -208,6 +208,10 @@ type DeploySpec struct { // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ServiceAccountName string `yaml:"serviceAccountName,omitempty"` + // Deployer specifies the type of deployment to use: "knative" or "raw" + // Defaults to "knative" for backwards compatibility + Deployer string `yaml:"deployer,omitempty" jsonschema:"enum=knative,enum=raw"` + Subscriptions []KnativeSubscription `yaml:"subscriptions,omitempty"` } diff --git a/pkg/functions/function_unit_test.go b/pkg/functions/function_unit_test.go index 5082e33a3e..7a0715db9e 100644 --- a/pkg/functions/function_unit_test.go +++ b/pkg/functions/function_unit_test.go @@ -1,4 +1,4 @@ -package functions +package functions_test import ( "os" @@ -7,6 +7,7 @@ import ( "testing" "gopkg.in/yaml.v2" + fn "knative.dev/func/pkg/functions" fnlabels "knative.dev/func/pkg/k8s/labels" . "knative.dev/func/pkg/testing" @@ -22,19 +23,19 @@ func TestFunction_Validate(t *testing.T) { root, cleanup := Mktemp(t) t.Cleanup(cleanup) - var f Function + var f fn.Function var err error // Loading a nonexistent (new) function should not fail // I.e. it will not run .Validate, or it would error that the function at // root has no language or name. - if f, err = NewFunction(root); err != nil { + if f, err = fn.NewFunction(root); err != nil { t.Fatal(err) } // Attempting to write the function will fail as being invalid invalidEnv := "*invalid" - f.Build.BuildEnvs = []Env{{Name: &invalidEnv}} + f.Build.BuildEnvs = []fn.Env{{Name: &invalidEnv}} if err = f.Write(); err == nil { t.Fatalf("expected error writing an incomplete (invalid) function") } @@ -46,7 +47,7 @@ func TestFunction_Validate(t *testing.T) { // serialization of the Function struct to a known filename. This is why this // test belongs here in the same package as the implementation rather than in // package functions_test which treats the function package as an opaque-box. - path := filepath.Join(root, FunctionFile) + path := filepath.Join(root, fn.FunctionFile) bb, err := yaml.Marshal(&f) if err != nil { t.Fatal(err) @@ -56,7 +57,7 @@ func TestFunction_Validate(t *testing.T) { } // Loading the invalid function should not fail, but validation should. - if f, err = NewFunction(root); err != nil { + if f, err = fn.NewFunction(root); err != nil { t.Fatal(err) } if err = f.Validate(); err == nil { // axiom check; not strictly part of this test @@ -64,11 +65,11 @@ func TestFunction_Validate(t *testing.T) { } // Remove the invalid structures... write should complete without error. - f.Build.BuildEnvs = []Env{} + f.Build.BuildEnvs = []fn.Env{} if err = f.Write(); err != nil { t.Fatal(err) } - if f, err = NewFunction(root); err != nil { + if f, err = fn.NewFunction(root); err != nil { t.Fatal(err) } if err = f.Validate(); err != nil { @@ -118,8 +119,8 @@ func TestFunction_ImageWithDigest(t *testing.T) { // 2: is still fetched after pushing the Function (which is a temporary fix -- it really should be during build) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - f := Function{ - Build: BuildSpec{ + f := fn.Function{ + Build: fn.BuildSpec{ Image: tt.fields.Image, }, } @@ -136,7 +137,7 @@ func TestFunction_ImageWithDigest(t *testing.T) { // registry is a single token (just the namespace). func TestFunction_ImageName(t *testing.T) { var ( - f Function + f fn.Function got string err error ) @@ -147,11 +148,11 @@ func TestFunction_ImageName(t *testing.T) { expectedImage string expectError bool }{ - {"short-name", "alice", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false}, - {"short-name-trailing-slash", "alice/", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false}, + {"short-name", "alice", "myfunc", fn.DefaultRegistry + "/alice/myfunc:latest", false}, + {"short-name-trailing-slash", "alice/", "myfunc", fn.DefaultRegistry + "/alice/myfunc:latest", false}, {"full-name-quay-io", "quay.io/alice", "myfunc", "quay.io/alice/myfunc:latest", false}, - {"full-name-docker-io", "docker.io/alice", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false}, - {"full-name-with-sub-path", "docker.io/alice/sub", "myfunc", DefaultRegistry + "/alice/sub/myfunc:latest", false}, + {"full-name-docker-io", "docker.io/alice", "myfunc", fn.DefaultRegistry + "/alice/myfunc:latest", false}, + {"full-name-with-sub-path", "docker.io/alice/sub", "myfunc", fn.DefaultRegistry + "/alice/sub/myfunc:latest", false}, {"localhost-direct", "localhost:5000", "myfunc", "localhost:5000/myfunc:latest", false}, {"full-name-with-sub-sub-path", "us-central1-docker.pkg.dev/my-gcpproject/team/user", "myfunc", "us-central1-docker.pkg.dev/my-gcpproject/team/user/myfunc:latest", false}, {"missing-func-name", "alice", "", "", true}, @@ -159,7 +160,7 @@ func TestFunction_ImageName(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f = Function{Registry: test.registry, Name: test.funcName} + f = fn.Function{Registry: test.registry, Name: test.funcName} got, err = f.ImageName() if test.expectError && err == nil { t.Errorf("registry '%v' and name '%v' did not yield the expected error", @@ -190,13 +191,13 @@ func Test_LabelsMap(t *testing.T) { tests := []struct { name string - labels []Label + labels []fn.Label expectErr bool expectedMap map[string]string }{ { name: "invalid Labels should return err", - labels: []Label{ + labels: []fn.Label{ { Value: &value1, }, @@ -205,7 +206,7 @@ func Test_LabelsMap(t *testing.T) { }, { name: "with valid env var", - labels: []Label{ + labels: []fn.Label{ { Key: &key1, Value: &valueLocalEnv4, @@ -218,7 +219,7 @@ func Test_LabelsMap(t *testing.T) { }, { name: "with invalid env var", - labels: []Label{ + labels: []fn.Label{ { Key: &key1, Value: &valueLocalEnvIncorrect4, @@ -228,7 +229,7 @@ func Test_LabelsMap(t *testing.T) { }, { name: "empty labels allowed. returns default labels", - labels: []Label{ + labels: []fn.Label{ { Key: &key1, }, @@ -240,7 +241,7 @@ func Test_LabelsMap(t *testing.T) { }, { name: "full set of labels", - labels: []Label{ + labels: []fn.Label{ { Key: &key1, Value: &value1, @@ -260,10 +261,10 @@ func Test_LabelsMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - f := Function{ + f := fn.Function{ Name: "some-function", Runtime: "golang", - Deploy: DeploySpec{Labels: tt.labels}, + Deploy: fn.DeploySpec{Labels: tt.labels}, } got, err := f.LabelsMap() @@ -289,7 +290,7 @@ func Test_LabelsMap(t *testing.T) { } } -func expectedDefaultLabels(f Function) map[string]string { +func expectedDefaultLabels(f fn.Function) map[string]string { return map[string]string{ fnlabels.FunctionNameKey: f.Name, fnlabels.FunctionRuntimeKey: f.Runtime, diff --git a/pkg/functions/instances.go b/pkg/functions/instances.go index 49acff683e..07c226b81c 100644 --- a/pkg/functions/instances.go +++ b/pkg/functions/instances.go @@ -84,5 +84,5 @@ func (s *InstanceRefs) Remote(ctx context.Context, name, namespace string) (i In return i, errors.New("fetching remote instances requires namespace") } - return s.client.describer.Describe(ctx, name, namespace) + return s.client.Describe(ctx, name, namespace, Function{}) } diff --git a/pkg/functions/instances_test.go b/pkg/functions/instances_test.go index 1c08e4b7d1..540523d3b8 100644 --- a/pkg/functions/instances_test.go +++ b/pkg/functions/instances_test.go @@ -1,4 +1,4 @@ -package functions +package functions_test import ( "context" @@ -6,6 +6,7 @@ import ( "strings" "testing" + fn "knative.dev/func/pkg/functions" . "knative.dev/func/pkg/testing" ) @@ -16,36 +17,36 @@ func TestInstances_LocalErrors(t *testing.T) { defer rm() // Create a function that will not be running - f, err := New().Init(Function{Runtime: "go", Root: root}) + f, err := fn.New().Init(fn.Function{Runtime: "go", Root: root}) if err != nil { t.Fatal(err) } tests := []struct { name string - f Function + f fn.Function wantIs error wantAs any }{ { name: "Not running", // Function exists but is not running f: f, - wantIs: ErrNotRunning, + wantIs: fn.ErrNotRunning, }, { name: "Not initialized", // A function directory is provided, but no function exists - f: Function{Root: "testdata/not-initialized"}, - wantAs: &ErrNotInitialized{}, + f: fn.Function{Root: "testdata/not-initialized"}, + wantAs: &fn.ErrNotInitialized{}, }, { name: "Root required", // No root directory is provided - f: Function{}, - wantIs: ErrRootRequired, + f: fn.Function{}, + wantIs: fn.ErrRootRequired, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - i := InstanceRefs{} + i := fn.InstanceRefs{} _, err := i.Local(context.Background(), tt.f) if tt.wantIs != nil && !errors.Is(err, tt.wantIs) { t.Errorf("Local() error = %v, want %#v", err, tt.wantIs) @@ -64,13 +65,13 @@ func TestInstance_RemoteErrors(t *testing.T) { defer rm() // Create a function that will not be running - _, err := New().Init(Function{Runtime: "go", Namespace: "ns1", Root: root}) + _, err := fn.New().Init(fn.Function{Runtime: "go", Namespace: "ns1", Root: root}) if err != nil { t.Fatal(err) } // Load the function - if _, err := NewFunction(root); err != nil { + if _, err := fn.NewFunction(root); err != nil { t.Fatal(err) } @@ -104,7 +105,7 @@ func TestInstance_RemoteErrors(t *testing.T) { } for _, test := range tests { t.Run(test.test, func(t *testing.T) { - i := InstanceRefs{} + i := fn.InstanceRefs{} _, err := i.Remote(context.Background(), test.name, test.namespace) if err == nil { t.Fatal("did not receive expected error") diff --git a/pkg/functions/job_test.go b/pkg/functions/job_test.go index 191aaa8101..f64e309f2c 100644 --- a/pkg/functions/job_test.go +++ b/pkg/functions/job_test.go @@ -1,10 +1,11 @@ -package functions +package functions_test import ( "context" "errors" "testing" + fn "knative.dev/func/pkg/functions" . "knative.dev/func/pkg/testing" ) @@ -21,25 +22,25 @@ import ( func TestJob_New(t *testing.T) { root, rm := Mktemp(t) defer rm() - client := New() + client := fn.New() // create a new function - f, err := client.Init(Function{Runtime: "go", Root: root}) + f, err := client.Init(fn.Function{Runtime: "go", Root: root}) if err != nil { t.Fatal(err) } // Assert that an initialized function and port are required onStop := func() error { return nil } - if _, err := NewJob(Function{}, "127.0.0.1", "8080", nil, onStop, false); err == nil { + if _, err := fn.NewJob(fn.Function{}, "127.0.0.1", "8080", nil, onStop, false); err == nil { t.Fatal("expected NewJob to require an initialized functoin") } - if _, err := NewJob(f, "127.0.0.1", "", nil, onStop, false); err == nil { + if _, err := fn.NewJob(f, "127.0.0.1", "", nil, onStop, false); err == nil { t.Fatal("expected NewJob to require a port") } // Assert creating a Job with the required arguments succeeds. - _, err = NewJob(f, "127.0.0.1", "8080", nil, onStop, false) + _, err = fn.NewJob(f, "127.0.0.1", "8080", nil, onStop, false) if err != nil { t.Fatalf("creating job failed. %s", err) } @@ -49,7 +50,7 @@ func TestJob_New(t *testing.T) { // that the system supports multiple instances running simultaneously. _, err = client.Instances().Local(context.Background(), f) if err != nil { - if errors.Is(err, ErrNotRunning) { + if errors.Is(err, fn.ErrNotRunning) { t.Fatalf("client does not recognize job as running. %s", err) } else { t.Fatalf("unexpected error checking client for instance's existence. %s", err) @@ -63,9 +64,9 @@ func TestJob_New(t *testing.T) { func TestJob_Stop(t *testing.T) { root, rm := Mktemp(t) defer rm() - client := New() + client := fn.New() - f, err := client.Init(Function{Runtime: "go", Root: root}) + f, err := client.Init(fn.Function{Runtime: "go", Root: root}) if err != nil { t.Fatal(err) } @@ -75,13 +76,13 @@ func TestJob_Stop(t *testing.T) { onStop := func() error { onStopInvoked = true; return nil } // Assert creating a Job with the required arguments succeeds. - j, err := NewJob(f, "127.0.0.1", "8080", nil, onStop, false) + j, err := fn.NewJob(f, "127.0.0.1", "8080", nil, onStop, false) if err != nil { t.Fatalf("creating job failed. %s", err) } _, err = client.Instances().Local(context.Background(), f) if err != nil { - if errors.Is(err, ErrNotRunning) { + if errors.Is(err, fn.ErrNotRunning) { t.Fatalf("client does not recognize job as running. %s", err) } else { t.Fatalf("unexpected error checking client for instance's existence. %s", err) diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go index ff069f95bb..8d528280df 100644 --- a/pkg/k8s/client.go +++ b/pkg/k8s/client.go @@ -2,6 +2,7 @@ package k8s import ( "fmt" + "time" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -9,6 +10,11 @@ import ( "k8s.io/client-go/tools/clientcmd" ) +const ( + DefaultWaitingTimeout = 120 * time.Second + DefaultErrorWindowTimeout = 2 * time.Second +) + func NewClientAndResolvedNamespace(ns string) (*kubernetes.Clientset, string, error) { var err error if ns == "" { diff --git a/pkg/k8s/deployer.go b/pkg/k8s/deployer.go new file mode 100644 index 0000000000..129ac0a620 --- /dev/null +++ b/pkg/k8s/deployer.go @@ -0,0 +1,727 @@ +package k8s + +import ( + "context" + "fmt" + "os" + "regexp" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" +) + +const ( + KubernetesDeployerName = "raw" + + DefaultLivenessEndpoint = "/health/liveness" + DefaultReadinessEndpoint = "/health/readiness" + DefaultHTTPPort = 8080 +) + +type DeployerOpt func(*Deployer) + +type Deployer struct { + verbose bool + decorator deployer.DeployDecorator +} + +func NewDeployer(opts ...DeployerOpt) *Deployer { + d := &Deployer{} + for _, opt := range opts { + opt(d) + } + return d +} + +func WithDeployerVerbose(verbose bool) DeployerOpt { + return func(d *Deployer) { + d.verbose = verbose + } +} + +func WithDeployerDecorator(decorator deployer.DeployDecorator) DeployerOpt { + return func(d *Deployer) { + d.decorator = decorator + } +} + +func onClusterFix(f fn.Function) fn.Function { + // This only exists because of a bootstapping problem with On-Cluster + // builds: It appears that, when sending a function to be built on-cluster + // the target namespace is not being transmitted in the pipeline + // configuration. We should figure out how to transmit this information + // to the pipeline run for initial builds. This is a new problem because + // earlier versions of this logic relied entirely on the current + // kubernetes context. + if f.Namespace == "" && f.Deploy.Namespace == "" { + f.Namespace, _ = GetDefaultNamespace() + } + return f +} + +func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResult, error) { + f = onClusterFix(f) + // Choosing f.Namespace vs f.Deploy.Namespace: + // This is minimal logic currently required of all deployer impls. + // If f.Namespace is defined, this is the (possibly new) target + // namespace. Otherwise use the last deployed namespace. Error if + // neither are set. The logic which arbitrates between curret k8s context, + // flags, environment variables and global defaults to determine the + // effective namespace is not logic for the deployer implementation, which + // should have a minimum of logic. In this case limited to "new ns or + // existing namespace? + namespace := f.Namespace + if namespace == "" { + namespace = f.Deploy.Namespace + } + if namespace == "" { + return fn.DeploymentResult{}, fmt.Errorf("deployer requires either a target namespace or that the function be already deployed") + } + + // Choosing an image to deploy: + // If the service has not been deployed before, but there exists a + // build image, this build image should be used for the deploy. + // TODO: test/consider the case where it HAS been deployed, and the + // build image has been updated /since/ deployment: do we need a + // timestamp? Incrementation? + if f.Deploy.Image == "" { + f.Deploy.Image = f.Build.Image + } + + clientset, err := NewKubernetesClientset() + if err != nil { + return fn.DeploymentResult{}, err + } + + // Check if Dapr is installed + daprInstalled := false + _, err = clientset.CoreV1().Namespaces().Get(ctx, "dapr-system", metav1.GetOptions{}) + if err == nil { + daprInstalled = true + } + + deploymentClient := clientset.AppsV1().Deployments(namespace) + serviceClient := clientset.CoreV1().Services(namespace) + + existingDeployment, err := deploymentClient.Get(ctx, f.Name, metav1.GetOptions{}) + + var status fn.Status + if err == nil { + // Update the existing function + deployment, svc, err := d.generateResources(f, namespace, daprInstalled) + if err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to generate resources: %w", err) + } + + // Preserve resource version for update + deployment.ResourceVersion = existingDeployment.ResourceVersion + + if _, err = deploymentClient.Update(ctx, deployment, metav1.UpdateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to update deployment: %w", err) + } + + existingService, err := serviceClient.Get(ctx, f.Name, metav1.GetOptions{}) + if err == nil { + svc.ResourceVersion = existingService.ResourceVersion + if _, err = serviceClient.Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to update service: %w", err) + } + } else if errors.IsNotFound(err) { + // Service doesn't exist, create it + if _, err = serviceClient.Create(ctx, svc, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create service: %w", err) + } + } else { + return fn.DeploymentResult{}, fmt.Errorf("failed to get existing service: %w", err) + } + + status = fn.Updated + if d.verbose { + fmt.Fprintf(os.Stderr, "Updated deployment and service %s in namespace %s\n", f.Name, namespace) + } + } else { + if !errors.IsNotFound(err) { + return fn.DeploymentResult{}, fmt.Errorf("failed to check for existing deployment: %w", err) + } + + deployment, svc, err := d.generateResources(f, namespace, daprInstalled) + if err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to generate resources: %w", err) + } + + if _, err = deploymentClient.Create(ctx, deployment, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create deployment: %w", err) + } + + if _, err = serviceClient.Create(ctx, svc, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create service: %w", err) + } + + status = fn.Deployed + if d.verbose { + fmt.Fprintf(os.Stderr, "Created deployment and service %s in namespace %s\n", f.Name, namespace) + } + } + + if err := WaitForDeploymentAvailable(ctx, clientset, namespace, f.Name, DefaultWaitingTimeout); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("deployment did not become ready: %w", err) + } + + url := fmt.Sprintf("http://%s.%s.svc.cluster.local", f.Name, namespace) + + return fn.DeploymentResult{ + Status: status, + URL: url, + Namespace: namespace, + }, nil +} + +func (d *Deployer) generateResources(f fn.Function, namespace string, daprInstalled bool) (*appsv1.Deployment, *corev1.Service, error) { + labels, err := deployer.GenerateCommonLabels(f, d.decorator) + if err != nil { + return nil, nil, err + } + + annotations := deployer.GenerateCommonAnnotations(f, d.decorator, daprInstalled, KubernetesDeployerName) + + // Use annotations for pod template + podAnnotations := make(map[string]string) + for k, v := range annotations { + podAnnotations[k] = v + } + + // Process environment variables and volumes + referencedSecrets := sets.New[string]() + referencedConfigMaps := sets.New[string]() + referencedPVCs := sets.New[string]() + + envVars, envFrom, err := ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + if err != nil { + return nil, nil, fmt.Errorf("failed to process environment variables: %w", err) + } + + volumes, volumeMounts, err := ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) + if err != nil { + return nil, nil, fmt.Errorf("failed to process volumes: %w", err) + } + + container := corev1.Container{ + Name: "user-container", + Image: f.Deploy.Image, + Ports: []corev1.ContainerPort{ + { + ContainerPort: DefaultHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + }, + Env: envVars, + EnvFrom: envFrom, + VolumeMounts: volumeMounts, + } + + SetHealthEndpoints(f, &container) + SetSecurityContext(&container) + + replicas := int32(1) + if f.Deploy.Options.Scale != nil && f.Deploy.Options.Scale.Min != nil && *f.Deploy.Options.Scale.Min > 0 { + replicas = int32(*f.Deploy.Options.Scale.Min) + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.Name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: podAnnotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + ServiceAccountName: f.Deploy.ServiceAccountName, + Volumes: volumes, + }, + }, + }, + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.Name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.FromInt32(DefaultHTTPPort), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + return deployment, service, nil +} + +// CheckResourcesArePresent returns error if Secrets or ConfigMaps +// referenced in input sets are not deployed on the cluster in the specified namespace +func CheckResourcesArePresent(ctx context.Context, namespace string, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string], referencedServiceAccount string) error { + errMsg := "" + for s := range *referencedSecrets { + _, err := GetSecret(ctx, s, namespace) + if err != nil { + if errors.IsForbidden(err) { + errMsg += " Ensure that the service account has the necessary permissions to access the secret.\n" + } else { + errMsg += fmt.Sprintf(" referenced Secret \"%s\" is not present in namespace \"%s\"\n", s, namespace) + } + } + } + + for cm := range *referencedConfigMaps { + _, err := GetConfigMap(ctx, cm, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced ConfigMap \"%s\" is not present in namespace \"%s\"\n", cm, namespace) + } + } + + for pvc := range *referencedPVCs { + _, err := GetPersistentVolumeClaim(ctx, pvc, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced PersistentVolumeClaim \"%s\" is not present in namespace \"%s\"\n", pvc, namespace) + } + } + + // check if referenced ServiceAccount is present in the namespace if it is not default + if referencedServiceAccount != "" && referencedServiceAccount != "default" { + err := GetServiceAccount(ctx, referencedServiceAccount, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced ServiceAccount \"%s\" is not present in namespace \"%s\"\n", referencedServiceAccount, namespace) + } + } + + if errMsg != "" { + return fmt.Errorf("error(s) while validating resources:\n%s", errMsg) + } + + return nil +} + +// SetHealthEndpoints configures health probes for a container +func SetHealthEndpoints(f fn.Function, container *corev1.Container) { + livenessPath := DefaultLivenessEndpoint + if f.Deploy.HealthEndpoints.Liveness != "" { + livenessPath = f.Deploy.HealthEndpoints.Liveness + } + + readinessPath := DefaultReadinessEndpoint + if f.Deploy.HealthEndpoints.Readiness != "" { + readinessPath = f.Deploy.HealthEndpoints.Readiness + } + + container.LivenessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: livenessPath, + Port: intstr.FromInt32(DefaultHTTPPort), + }, + }, + } + + container.ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: readinessPath, + Port: intstr.FromInt32(DefaultHTTPPort), + }, + }, + } +} + +// SetSecurityContext configures security settings for a container +func SetSecurityContext(container *corev1.Container) { + runAsNonRoot := true + allowPrivilegeEscalation := false + capabilities := corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + } + seccompProfile := corev1.SeccompProfile{ + Type: "RuntimeDefault", + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsNonRoot: &runAsNonRoot, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + Capabilities: &capabilities, + SeccompProfile: &seccompProfile, + } +} + +// ProcessEnvs generates array of EnvVars and EnvFromSources from a function config +// envs: +// - name: EXAMPLE1 # ENV directly from a value +// value: value1 +// - name: EXAMPLE2 # ENV from the local ENV var +// value: {{ env:MY_ENV }} +// - name: EXAMPLE3 +// value: {{ secret:example-secret:key }} # ENV from a key in Secret +// - value: {{ secret:example-secret }} # all ENVs from Secret +// - name: EXAMPLE4 +// value: {{ configMap:configMapName:key }} # ENV from a key in ConfigMap +// - value: {{ configMap:configMapName }} # all key-pair values from ConfigMap are set as ENV +func ProcessEnvs(envs []fn.Env, referencedSecrets, referencedConfigMaps *sets.Set[string]) ([]corev1.EnvVar, []corev1.EnvFromSource, error) { + + envs = withOpenAddress(envs) // prepends ADDRESS=0.0.0.0 if not extant + + envVars := []corev1.EnvVar{{Name: "BUILT", Value: time.Now().Format("20060102T150405")}} + envFrom := []corev1.EnvFromSource{} + + for _, env := range envs { + if env.Name == nil && env.Value != nil { + // all key-pair values from secret/configMap are set as ENV, eg. {{ secret:secretName }} or {{ configMap:configMapName }} + if strings.HasPrefix(*env.Value, "{{") { + envFromSource, err := createEnvFromSource(*env.Value, referencedSecrets, referencedConfigMaps) + if err != nil { + return nil, nil, err + } + envFrom = append(envFrom, *envFromSource) + continue + } + } else if env.Name != nil && env.Value != nil { + if strings.HasPrefix(*env.Value, "{{") { + slices := strings.Split(strings.Trim(*env.Value, "{} "), ":") + if len(slices) == 3 { + // ENV from a key in secret/configMap, eg. FOO={{ secret:secretName:key }} FOO={{ configMap:configMapName.key }} + valueFrom, err := createEnvVarSource(slices, referencedSecrets, referencedConfigMaps) + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, ValueFrom: valueFrom}) + if err != nil { + return nil, nil, err + } + continue + } else if len(slices) == 2 { + // ENV from the local ENV var, eg. FOO={{ env:LOCAL_ENV }} + localValue, err := processLocalEnvValue(*env.Value) + if err != nil { + return nil, nil, err + } + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: localValue}) + continue + } + } else { + // a standard ENV with key and value, eg. FOO=bar + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: *env.Value}) + continue + } + } + return nil, nil, fmt.Errorf("unsupported env source entry \"%v\"", env) + } + + return envVars, envFrom, nil +} + +// withOpenAddress prepends ADDRESS=0.0.0.0 to the envs if not present. +// +// This is combined with the value of PORT at runtime to determine the full +// Listener address on which a Function will listen tcp requests. +// +// Runtimes should, by default, only listen on the loopback interface by +// default, as they may be `func run` locally, for security purposes. +// This environment variable instructs the runtimes to listen on all interfaces +// by default when actually being deployed, since they will need to actually +// listen for client requests and for health readiness/liveness probes. +// +// Should a user wish to securely open their function to only receive requests +// on a specific interface, such as a WireGuard-encrypted mesh network which +// presents as a specific interface, that can be achieved by setting the +// ADDRESS value as an environment variable on their function to the interface +// on which to listen. +// +// NOTE this env is currently only respected by scaffolded Go functions, because +// they are the only ones which support being `func run` locally. Other +// runtimes will respect the value as they are updated to support scaffolding. +func withOpenAddress(ee []fn.Env) []fn.Env { + // TODO: this is unnecessarily complex due to both key and value of the + // envs slice being being pointers. There is an outstanding tech-debt item + // to remove pointers from Function Envs, Volumes, Labels, and Options. + var found bool + for _, e := range ee { + if e.Name != nil && *e.Name == "ADDRESS" { + found = true + break + } + } + if !found { + k := "ADDRESS" + v := "0.0.0.0" + ee = append(ee, fn.Env{Name: &k, Value: &v}) + } + return ee +} + +func createEnvFromSource(value string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvFromSource, error) { + slices := strings.Split(strings.Trim(value, "{} "), ":") + if len(slices) != 2 { + return nil, fmt.Errorf("env requires a value in form \"resourceType:name\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) + } + + envVarSource := corev1.EnvFromSource{} + + typeString := strings.TrimSpace(slices[0]) + sourceName := strings.TrimSpace(slices[1]) + + var sourceType string + + switch typeString { + case "configMap": + sourceType = "ConfigMap" + envVarSource.ConfigMapRef = &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }} + + if !referencedConfigMaps.Has(sourceName) { + referencedConfigMaps.Insert(sourceName) + } + case "secret": + sourceType = "Secret" + envVarSource.SecretRef = &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }} + if !referencedSecrets.Has(sourceName) { + referencedSecrets.Insert(sourceName) + } + default: + return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) + } + + if len(sourceName) == 0 { + return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) + } + + return &envVarSource, nil +} + +func createEnvVarSource(slices []string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvVarSource, error) { + if len(slices) != 3 { + return nil, fmt.Errorf("env requires a value in form \"resourceType:name:key\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) + } + + envVarSource := corev1.EnvVarSource{} + + typeString := strings.TrimSpace(slices[0]) + sourceName := strings.TrimSpace(slices[1]) + sourceKey := strings.TrimSpace(slices[2]) + + var sourceType string + + switch typeString { + case "configMap": + sourceType = "ConfigMap" + envVarSource.ConfigMapKeyRef = &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }, + Key: sourceKey} + + if !referencedConfigMaps.Has(sourceName) { + referencedConfigMaps.Insert(sourceName) + } + case "secret": + sourceType = "Secret" + envVarSource.SecretKeyRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }, + Key: sourceKey} + + if !referencedSecrets.Has(sourceName) { + referencedSecrets.Insert(sourceName) + } + default: + return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) + } + + if len(sourceName) == 0 { + return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) + } + + if len(sourceKey) == 0 { + return nil, fmt.Errorf("the key referenced by resource %s %q cannot be an empty string", sourceType, sourceName) + } + + return &envVarSource, nil +} + +var evRegex = regexp.MustCompile(`^{{\s*(\w+)\s*:(\w+)\s*}}$`) + +const ( + ctxIdx = 1 + valIdx = 2 +) + +func processLocalEnvValue(val string) (string, error) { + match := evRegex.FindStringSubmatch(val) + if len(match) > valIdx { + if match[ctxIdx] != "env" { + return "", fmt.Errorf("allowed env value entry is \"{{ env:LOCAL_VALUE }}\"; got: %q", match[ctxIdx]) + } + if v, ok := os.LookupEnv(match[valIdx]); ok { + return v, nil + } else { + return "", fmt.Errorf("required local environment variable %q is not set", match[valIdx]) + } + } else { + return val, nil + } +} + +// ProcessVolumes generates Volumes and VolumeMounts from a function config +// volumes: +// - secret: example-secret # mount Secret as Volume +// path: /etc/secret-volume +// - configMap: example-configMap # mount ConfigMap as Volume +// path: /etc/configMap-volume +// - persistentVolumeClaim: { claimName: example-pvc } # mount PersistentVolumeClaim as Volume +// path: /etc/secret-volume +// - emptyDir: {} # mount EmptyDir as Volume +// path: /etc/configMap-volume +func ProcessVolumes(volumes []fn.Volume, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string]) ([]corev1.Volume, []corev1.VolumeMount, error) { + createdVolumes := sets.NewString() + usedPaths := sets.NewString() + + newVolumes := []corev1.Volume{} + newVolumeMounts := []corev1.VolumeMount{} + + for _, vol := range volumes { + + volumeName := "" + + if vol.Secret != nil { + volumeName = "secret-" + *vol.Secret + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: *vol.Secret, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedSecrets.Has(*vol.Secret) { + referencedSecrets.Insert(*vol.Secret) + } + } + } else if vol.ConfigMap != nil { + volumeName = "config-map-" + *vol.ConfigMap + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: *vol.ConfigMap, + }, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedConfigMaps.Has(*vol.ConfigMap) { + referencedConfigMaps.Insert(*vol.ConfigMap) + } + } + } else if vol.PersistentVolumeClaim != nil { + volumeName = "pvc-" + *vol.PersistentVolumeClaim.ClaimName + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: *vol.PersistentVolumeClaim.ClaimName, + ReadOnly: vol.PersistentVolumeClaim.ReadOnly, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedPVCs.Has(*vol.PersistentVolumeClaim.ClaimName) { + referencedPVCs.Insert(*vol.PersistentVolumeClaim.ClaimName) + } + } + } else if vol.EmptyDir != nil { + volumeName = "empty-dir-" + rand.String(7) + + if !createdVolumes.Has(volumeName) { + + var sizeLimit *resource.Quantity + if vol.EmptyDir.SizeLimit != nil { + sl, err := resource.ParseQuantity(*vol.EmptyDir.SizeLimit) + if err != nil { + return nil, nil, fmt.Errorf("invalid quantity for sizeLimit: %s. Error: %s", *vol.EmptyDir.SizeLimit, err) + } + sizeLimit = &sl + } + + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMedium(vol.EmptyDir.Medium), + SizeLimit: sizeLimit, + }, + }, + }) + createdVolumes.Insert(volumeName) + } + } + + if volumeName != "" { + if !usedPaths.Has(*vol.Path) { + newVolumeMounts = append(newVolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: *vol.Path, + }) + usedPaths.Insert(*vol.Path) + } else { + return nil, nil, fmt.Errorf("mount path %s is defined multiple times", *vol.Path) + } + } + } + + return newVolumes, newVolumeMounts, nil +} + +func UsesRawDeployer(annotations map[string]string) bool { + deployer, ok := annotations[deployer.DeployerNameAnnotation] + + return ok && deployer == KubernetesDeployerName +} diff --git a/pkg/k8s/deployer_int_test.go b/pkg/k8s/deployer_int_test.go new file mode 100644 index 0000000000..725b0de0cf --- /dev/null +++ b/pkg/k8s/deployer_int_test.go @@ -0,0 +1,62 @@ +//go:build integration +// +build integration + +package k8s_test + +import ( + "testing" + + deployertesting "knative.dev/func/pkg/deployer/testing" + "knative.dev/func/pkg/k8s" +) + +func TestInt_FullPath(t *testing.T) { + deployertesting.TestInt_FullPath(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewLister(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} + +func TestInt_Deploy(t *testing.T) { + deployertesting.TestInt_Deploy(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} + +func TestInt_Metadata(t *testing.T) { + deployertesting.TestInt_Metadata(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} + +func TestInt_Events(t *testing.T) { + t.Skip("Kubernetes deploy does not support func subscribe yet") + + deployertesting.TestInt_Events(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} + +func TestInt_Scale(t *testing.T) { + deployertesting.TestInt_Scale(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} + +func TestInt_EnvsUpdate(t *testing.T) { + deployertesting.TestInt_EnvsUpdate(t, + k8s.NewDeployer(k8s.WithDeployerVerbose(false)), + k8s.NewRemover(false), + k8s.NewDescriber(false), + k8s.KubernetesDeployerName) +} diff --git a/pkg/knative/deployer_test.go b/pkg/k8s/deployer_test.go similarity index 84% rename from pkg/knative/deployer_test.go rename to pkg/k8s/deployer_test.go index e8832d3258..b515c6b575 100644 --- a/pkg/knative/deployer_test.go +++ b/pkg/k8s/deployer_test.go @@ -1,15 +1,14 @@ -package knative +package k8s import ( "os" "testing" corev1 "k8s.io/api/core/v1" - fn "knative.dev/func/pkg/functions" ) -func Test_setHealthEndpoints(t *testing.T) { +func Test_SetHealthEndpoints(t *testing.T) { f := fn.Function{ Name: "testing", Deploy: fn.DeploySpec{ @@ -20,7 +19,7 @@ func Test_setHealthEndpoints(t *testing.T) { }, } c := corev1.Container{} - setHealthEndpoints(f, &c) + SetHealthEndpoints(f, &c) got := c.LivenessProbe.HTTPGet.Path if got != "/lively" { t.Errorf("expected \"/lively\" but got %v", got) @@ -31,19 +30,19 @@ func Test_setHealthEndpoints(t *testing.T) { } } -func Test_setHealthEndpointDefaults(t *testing.T) { +func Test_SetHealthEndpointDefaults(t *testing.T) { f := fn.Function{ Name: "testing", } c := corev1.Container{} - setHealthEndpoints(f, &c) + SetHealthEndpoints(f, &c) got := c.LivenessProbe.HTTPGet.Path - if got != LIVENESS_ENDPOINT { - t.Errorf("expected \"%v\" but got %v", LIVENESS_ENDPOINT, got) + if got != DefaultLivenessEndpoint { + t.Errorf("expected \"%v\" but got %v", DefaultLivenessEndpoint, got) } got = c.ReadinessProbe.HTTPGet.Path - if got != READINESS_ENDPOINT { - t.Errorf("expected \"%v\" but got %v", READINESS_ENDPOINT, got) + if got != DefaultReadinessEndpoint { + t.Errorf("expected \"%v\" but got %v", DefaultReadinessEndpoint, got) } } diff --git a/pkg/k8s/describer.go b/pkg/k8s/describer.go new file mode 100644 index 0000000000..f64e126d60 --- /dev/null +++ b/pkg/k8s/describer.go @@ -0,0 +1,70 @@ +package k8s + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fn "knative.dev/func/pkg/functions" +) + +type Describer struct { + verbose bool +} + +func NewDescriber(verbose bool) *Describer { + return &Describer{ + verbose: verbose, + } +} + +// Describe a function by name. +func (d *Describer) Describe(ctx context.Context, name, namespace string) (fn.Instance, error) { + if namespace == "" { + return fn.Instance{}, fmt.Errorf("function namespace is required when describing %q", name) + } + + clientset, err := NewKubernetesClientset() + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to create k8s client: %v", err) + } + + deploymentClient := clientset.AppsV1().Deployments(namespace) + serviceClient := clientset.CoreV1().Services(namespace) + + service, err := serviceClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // Service doesn't exist - we don't handle this + return fn.Instance{}, fn.ErrNotHandled + } + + // Other errors (permissions, network, etc.) - real error + return fn.Instance{}, fmt.Errorf("failed to check if service uses raw K8s deployer: %w", err) + } + + if !UsesRawDeployer(service.Annotations) { + return fn.Instance{}, fn.ErrNotHandled + } + + // We're responsible, for this function --> proceed... + + deployment, err := deploymentClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to get deployment %q: %v", name, err) + } + + primaryRouteURL := fmt.Sprintf("http://%s.%s.svc", name, namespace) // TODO: get correct scheme? + + description := fn.Instance{ + Name: name, + Namespace: namespace, + Deployer: KubernetesDeployerName, + Labels: deployment.Labels, + Route: primaryRouteURL, + Routes: []string{primaryRouteURL}, + } + + return description, nil +} diff --git a/pkg/k8s/describer_int_test.go b/pkg/k8s/describer_int_test.go new file mode 100644 index 0000000000..b7f8975bfc --- /dev/null +++ b/pkg/k8s/describer_int_test.go @@ -0,0 +1,19 @@ +//go:build integration +// +build integration + +package k8s_test + +import ( + "testing" + + describertesting "knative.dev/func/pkg/describer/testing" + "knative.dev/func/pkg/k8s" +) + +func TestInt_Describe(t *testing.T) { + describertesting.TestInt_Describe(t, + k8s.NewDescriber(true), + k8s.NewDeployer(k8s.WithDeployerVerbose(true)), + k8s.NewRemover(true), + k8s.KubernetesDeployerName) +} diff --git a/pkg/k8s/lister.go b/pkg/k8s/lister.go new file mode 100644 index 0000000000..8247c96045 --- /dev/null +++ b/pkg/k8s/lister.go @@ -0,0 +1,91 @@ +package k8s + +import ( + "context" + "fmt" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + fn "knative.dev/func/pkg/functions" +) + +type Lister struct { + verbose bool +} + +func NewLister(verbose bool) fn.Lister { + return &Lister{ + verbose: verbose, + } +} + +func (l *Lister) List(ctx context.Context, namespace string) ([]fn.ListItem, error) { + clientset, err := NewKubernetesClientset() + if err != nil { + return nil, fmt.Errorf("unable to create k8s client: %v", err) + } + + serviceClient := clientset.CoreV1().Services(namespace) + + services, err := serviceClient.List(ctx, metav1.ListOptions{ + LabelSelector: "function.knative.dev/name", + }) + if err != nil { + return nil, fmt.Errorf("unable to list services: %v", err) + } + + listItems := make([]fn.ListItem, 0, len(services.Items)) + for _, service := range services.Items { + if !UsesRawDeployer(service.Annotations) { + continue + } + + item, err := l.get(ctx, clientset, service.Name, namespace) + if err != nil { + return nil, fmt.Errorf("unable to get details about function: %v", err) + } + + listItems = append(listItems, item) + } + + return listItems, nil +} + +// Get a function, optionally specifying a namespace. +func (l *Lister) get(ctx context.Context, clientset *kubernetes.Clientset, name, namespace string) (fn.ListItem, error) { + deploymentClient := clientset.AppsV1().Deployments(namespace) + serviceClient := clientset.CoreV1().Services(namespace) + + deployment, err := deploymentClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fn.ListItem{}, fmt.Errorf("could not get deployment: %w", err) + } + + // get status + ready := corev1.ConditionUnknown + for _, con := range deployment.Status.Conditions { + if con.Type == v1.DeploymentAvailable { + ready = con.Status + break + } + } + + service, err := serviceClient.Get(ctx, deployment.Name, metav1.GetOptions{}) + if err != nil { + return fn.ListItem{}, fmt.Errorf("could not get service: %w", err) + } + + runtimeLabel := "" + listItem := fn.ListItem{ + Name: service.Name, + Namespace: service.Namespace, + Runtime: runtimeLabel, + URL: fmt.Sprintf("http://%s.%s.svc", service.Name, service.Namespace), // TODO: use correct scheme + Ready: string(ready), + Deployer: KubernetesDeployerName, + } + + return listItem, nil +} diff --git a/pkg/k8s/lister_int_test.go b/pkg/k8s/lister_int_test.go new file mode 100644 index 0000000000..5a4058e5b6 --- /dev/null +++ b/pkg/k8s/lister_int_test.go @@ -0,0 +1,20 @@ +//go:build integration +// +build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/k8s" + listertesting "knative.dev/func/pkg/lister/testing" +) + +func TestInt_List(t *testing.T) { + listertesting.TestInt_List(t, + k8s.NewLister(true), + k8s.NewDeployer(k8s.WithDeployerVerbose(true)), + k8s.NewDescriber(true), + k8s.NewRemover(true), + k8s.KubernetesDeployerName) +} diff --git a/pkg/k8s/logs.go b/pkg/k8s/logs.go index a8f5cc4cb6..ed06caa1b1 100644 --- a/pkg/k8s/logs.go +++ b/pkg/k8s/logs.go @@ -3,9 +3,15 @@ package k8s import ( "bytes" "context" + "fmt" "io" + "sync" + "time" + "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" ) // GetPodLogs returns logs from a specified Container in a Pod, if container is empty string, @@ -33,3 +39,136 @@ func GetPodLogs(ctx context.Context, namespace, podName, containerName string) ( return buffer.String(), nil } + +// GetPodLogsBySelector will get logs of a pod. +// +// It will do so by gathering logs of the given container of all affiliated pods. +// In addition, filtering on image can be done so only logs for given image are logged. +// +// This function runs as long as the passed context is active (i.e. it is required cancel the context to stop log gathering). +func GetPodLogsBySelector(ctx context.Context, namespace, labelSelector, containerName, image string, since *time.Time, out io.Writer) error { + client, namespace, err := NewClientAndResolvedNamespace(namespace) + if err != nil { + return fmt.Errorf("cannot create k8s client: %w", err) + } + + pods := client.CoreV1().Pods(namespace) + + podListOpts := metav1.ListOptions{ + Watch: true, + LabelSelector: labelSelector, + } + + w, err := pods.Watch(ctx, podListOpts) + if err != nil { + return fmt.Errorf("cannot create watch: %w", err) + } + defer w.Stop() + + beingProcessed := make(map[string]bool) + var beingProcessedMu sync.Mutex + + copyLogs := func(pod corev1.Pod) error { + defer func() { + beingProcessedMu.Lock() + delete(beingProcessed, pod.Name) + beingProcessedMu.Unlock() + }() + podLogOpts := corev1.PodLogOptions{ + Container: containerName, + Follow: true, + } + if since != nil { + sinceTime := metav1.NewTime(*since) + podLogOpts.SinceTime = &sinceTime + } + req := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) + + r, e := req.Stream(ctx) + if e != nil { + return fmt.Errorf("cannot get stream: %w", e) + } + defer r.Close() + _, e = io.Copy(out, r) + if e != nil { + return fmt.Errorf("error copying logs: %w", e) + } + return nil + } + + mayReadLogs := func(pod corev1.Pod) bool { + for _, status := range pod.Status.ContainerStatuses { + if status.Name == containerName { + return status.State.Running != nil || status.State.Terminated != nil + } + } + return false + } + + getImage := func(pod corev1.Pod) string { + for _, ctr := range pod.Spec.Containers { + if ctr.Name == containerName { + return ctr.Image + } + } + return "" + } + + var eg errgroup.Group + + for event := range w.ResultChan() { + if event.Type == watch.Modified || event.Type == watch.Added { + pod := *event.Object.(*corev1.Pod) + + beingProcessedMu.Lock() + _, loggingAlready := beingProcessed[pod.Name] + beingProcessedMu.Unlock() + + if !loggingAlready && (image == "" || image == getImage(pod)) && mayReadLogs(pod) { + + beingProcessedMu.Lock() + beingProcessed[pod.Name] = true + beingProcessedMu.Unlock() + + // Capture pod value for the goroutine to avoid closure over loop variable + pod := pod + eg.Go(func() error { return copyLogs(pod) }) + } + } + } + + err = eg.Wait() + if err != nil { + return fmt.Errorf("error while gathering logs: %w", err) + } + return nil +} + +type SynchronizedBuffer struct { + b bytes.Buffer + mu sync.Mutex +} + +func (b *SynchronizedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.String() +} + +func (b *SynchronizedBuffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Write(p) +} + +func (b *SynchronizedBuffer) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Read(p) +} + +func (b *SynchronizedBuffer) Reset() { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Reset() +} diff --git a/pkg/k8s/remover.go b/pkg/k8s/remover.go new file mode 100644 index 0000000000..3955d52dde --- /dev/null +++ b/pkg/k8s/remover.go @@ -0,0 +1,70 @@ +package k8s + +import ( + "context" + "fmt" + "os" + + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fn "knative.dev/func/pkg/functions" +) + +func NewRemover(verbose bool) *Remover { + return &Remover{ + verbose: verbose, + } +} + +type Remover struct { + verbose bool +} + +func (remover *Remover) Remove(ctx context.Context, name, ns string) error { + if ns == "" { + fmt.Fprintf(os.Stderr, "no namespace defined when trying to delete a function in knative remover\n") + return fn.ErrNamespaceRequired + } + + clientset, err := NewKubernetesClientset() + if err != nil { + return fmt.Errorf("could not setup kubernetes clientset: %w", err) + } + + serviceClient := clientset.CoreV1().Services(ns) + svc, err := serviceClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apiErrors.IsNotFound(err) { + // Service doesn't exist - we don't handle this + return fn.ErrNotHandled + } + return err + } + + if !UsesRawDeployer(svc.Annotations) { + return fn.ErrNotHandled + } + + // We're responsible, for this function --> proceed... + + deploymentClient := clientset.AppsV1().Deployments(ns) + + // TODO: delete only one and let the api server handle the other via the owner reference + err = deploymentClient.Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + if apiErrors.IsNotFound(err) { + return fn.ErrFunctionNotFound + } + return fmt.Errorf("k8s remover failed to delete the deployment: %v", err) + } + + err = serviceClient.Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + if apiErrors.IsNotFound(err) { + return fn.ErrFunctionNotFound + } + return fmt.Errorf("k8s remover failed to delete the service: %v", err) + } + + return nil +} diff --git a/pkg/k8s/remover_int_test.go b/pkg/k8s/remover_int_test.go new file mode 100644 index 0000000000..ae22b8bedb --- /dev/null +++ b/pkg/k8s/remover_int_test.go @@ -0,0 +1,20 @@ +//go:build integration +// +build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/k8s" + removertesting "knative.dev/func/pkg/remover/testing" +) + +func TestInt_Remove(t *testing.T) { + removertesting.TestInt_Remove(t, + k8s.NewRemover(true), + k8s.NewDeployer(k8s.WithDeployerVerbose(true)), + k8s.NewDescriber(true), + k8s.NewLister(true), + k8s.KubernetesDeployerName) +} diff --git a/pkg/k8s/wait.go b/pkg/k8s/wait.go new file mode 100644 index 0000000000..84fbcdadda --- /dev/null +++ b/pkg/k8s/wait.go @@ -0,0 +1,134 @@ +package k8s + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// WaitForDeploymentAvailable waits for a specific deployment to be fully available. +// A deployment is considered available when: +// - The number of available replicas matches the desired replicas +// - All replicas are updated to the latest version +// - There are no unavailable replicas +// - All pods associated with the deployment are running +func WaitForDeploymentAvailable(ctx context.Context, clientset *kubernetes.Clientset, namespace, deploymentName string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return checkIfDeploymentIsAvailable(ctx, clientset, deployment) + }) +} + +func WaitForDeploymentAvailableBySelector(ctx context.Context, clientset *kubernetes.Clientset, namespace, selector string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployments, err := clientset.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return false, err + } + + for _, deployment := range deployments.Items { + ready, err := checkIfDeploymentIsAvailable(ctx, clientset, &deployment) + if err != nil || !ready { + return ready, err + } + } + + return true, nil + }) +} + +func checkIfDeploymentIsAvailable(ctx context.Context, clientset *kubernetes.Clientset, deployment *appsv1.Deployment) (bool, error) { + // Check if the deployment has the desired number of replicas + if deployment.Spec.Replicas == nil { + return false, fmt.Errorf("deployment %s has nil replicas", deployment.Name) + } + + desiredReplicas := *deployment.Spec.Replicas + + // Check if deployment is available + for _, condition := range deployment.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { + // Also verify that all replicas are updated, ready, and available + if deployment.Status.UpdatedReplicas == desiredReplicas && + deployment.Status.ReadyReplicas == desiredReplicas && + deployment.Status.AvailableReplicas == desiredReplicas && + deployment.Status.UnavailableReplicas == 0 { + + // Get the current ReplicaSet for this deployment + replicaSets, err := clientset.AppsV1().ReplicaSets(deployment.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector), + }) + if err != nil { + return false, err + } + + // Find the current active ReplicaSet (the one with desired replicas > 0) + var currentPodTemplateHash string + for _, rs := range replicaSets.Items { + if rs.Spec.Replicas != nil && *rs.Spec.Replicas > 0 { + // The pod-template-hash label identifies pods from this ReplicaSet + if hash, ok := rs.Labels["pod-template-hash"]; ok { + currentPodTemplateHash = hash + break + } + } + } + + if currentPodTemplateHash == "" { + return false, fmt.Errorf("could not find current pod-template-hash for deployment %s", deployment.Name) + } + + // Verify all pods are from the current ReplicaSet and are running + labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector) + pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return false, err + } + + // Count ready pods from current ReplicaSet only + readyPods := 0 + for _, pod := range pods.Items { + // Check if pod belongs to current ReplicaSet + podHash, hasPodHash := pod.Labels["pod-template-hash"] + if !hasPodHash || podHash != currentPodTemplateHash { + // Pod is from an old ReplicaSet - deployment not fully rolled out + if pod.DeletionTimestamp == nil { + // Old pod still exists and not being deleted + return false, nil + } + continue + } + + // Check if pod is ready + for _, podCondition := range pod.Status.Conditions { + if podCondition.Type == corev1.PodReady && podCondition.Status == corev1.ConditionTrue { + readyPods++ + break + } + } + } + + // Ensure we have the desired number of running pods from current ReplicaSet + if int32(readyPods) == desiredReplicas { + return true, nil + } + } + } + } + + return false, nil +} diff --git a/pkg/knative/client.go b/pkg/knative/client.go index 5a623cc2b0..dee3886340 100644 --- a/pkg/knative/client.go +++ b/pkg/knative/client.go @@ -3,7 +3,6 @@ package knative import ( "fmt" "os" - "time" clienteventingv1 "knative.dev/client/pkg/eventing/v1" clientservingv1 "knative.dev/client/pkg/serving/v1" @@ -14,11 +13,6 @@ import ( "knative.dev/func/pkg/k8s" ) -const ( - DefaultWaitingTimeout = 120 * time.Second - DefaultErrorWindowTimeout = 2 * time.Second -) - func NewServingClient(namespace string) (clientservingv1.KnServingClient, error) { if err := validateKubeconfigFile(); err != nil { return nil, err diff --git a/pkg/knative/deployer.go b/pkg/knative/deployer.go index e2f74be641..4786ac1237 100644 --- a/pkg/knative/deployer.go +++ b/pkg/knative/deployer.go @@ -5,38 +5,32 @@ import ( "fmt" "io" "os" - "regexp" "strings" "time" - clienteventingv1 "knative.dev/client/pkg/eventing/v1" - eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" - duckv1 "knative.dev/pkg/apis/duck/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" + clienteventingv1 "knative.dev/client/pkg/eventing/v1" "knative.dev/client/pkg/flags" servingclientlib "knative.dev/client/pkg/serving" clientservingv1 "knative.dev/client/pkg/serving/v1" "knative.dev/client/pkg/wait" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/serving/pkg/apis/autoscaling" - v1 "knative.dev/serving/pkg/apis/serving/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/func/pkg/deployer" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/k8s" ) -const LIVENESS_ENDPOINT = "/health/liveness" -const READINESS_ENDPOINT = "/health/readiness" - -type DeployDecorator interface { - UpdateAnnotations(fn.Function, map[string]string) map[string]string - UpdateLabels(fn.Function, map[string]string) map[string]string -} +const ( + KnativeDeployerName = "knative" +) type DeployerOpt func(*Deployer) @@ -44,19 +38,7 @@ type Deployer struct { // verbose logging enablement flag. verbose bool - decorator DeployDecorator -} - -// ActiveNamespace attempts to read the Kubernetes active namespace. -// Missing configs or not having an active Kubernetes configuration are -// equivalent to having no default namespace (empty string). -func ActiveNamespace() string { - // Get client config, if it exists, and from that the namespace - ns, _, err := k8s.GetClientConfig().Namespace() - if err != nil { - fmt.Fprintf(os.Stderr, "Warning: unable to get active namespace: %v\n", err) - } - return ns + decorator deployer.DeployDecorator } func NewDeployer(opts ...DeployerOpt) *Deployer { @@ -75,7 +57,7 @@ func WithDeployerVerbose(verbose bool) DeployerOpt { } } -func WithDeployerDecorator(decorator DeployDecorator) DeployerOpt { +func WithDeployerDecorator(decorator deployer.DeployDecorator) DeployerOpt { return func(d *Deployer) { d.decorator = decorator } @@ -147,7 +129,7 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu // Choosing an image to deploy: // If the service has not been deployed before, but there exists a // build image, this build image should be used for the deploy. - // TODO: test/consdier the case where it HAS been deployed, and the + // TODO: test/consider the case where it HAS been deployed, and the // build image has been updated /since/ deployment: do we need a // timestamp? Incrementation? if f.Deploy.Image == "" { @@ -174,7 +156,7 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu daprInstalled = true } - var outBuff SynchronizedBuffer + var outBuff k8s.SynchronizedBuffer var out io.Writer = &outBuff if d.verbose { @@ -202,7 +184,7 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu return fn.DeploymentResult{}, err } - err = checkResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) + err = k8s.CheckResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) if err != nil { err = fmt.Errorf("knative deployer failed to generate the Knative Service: %v", err) return fn.DeploymentResult{}, err @@ -230,7 +212,7 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu }() go func() { err, _ := client.WaitForService(ctx, f.Name, - clientservingv1.WaitConfig{Timeout: DefaultWaitingTimeout, ErrorWindow: DefaultErrorWindowTimeout}, + clientservingv1.WaitConfig{Timeout: k8s.DefaultWaitingTimeout, ErrorWindow: k8s.DefaultErrorWindowTimeout}, wait.NoopMessageCallback()) cherr <- err close(cherr) @@ -294,17 +276,17 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu referencedConfigMaps := sets.New[string]() referencedPVCs := sets.New[string]() - newEnv, newEnvFrom, err := processEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + newEnv, newEnvFrom, err := k8s.ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) if err != nil { return fn.DeploymentResult{}, err } - newVolumes, newVolumeMounts, err := processVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) + newVolumes, newVolumeMounts, err := k8s.ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) if err != nil { return fn.DeploymentResult{}, err } - err = checkResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) + err = k8s.CheckResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) if err != nil { err = fmt.Errorf("knative deployer failed to update the Knative Service: %v", err) return fn.DeploymentResult{}, err @@ -317,7 +299,7 @@ func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResu } err, _ = client.WaitForService(ctx, f.Name, - clientservingv1.WaitConfig{Timeout: DefaultWaitingTimeout, ErrorWindow: DefaultErrorWindowTimeout}, + clientservingv1.WaitConfig{Timeout: k8s.DefaultWaitingTimeout, ErrorWindow: k8s.DefaultErrorWindowTimeout}, wait.NoopMessageCallback()) if err != nil { if !d.verbose { @@ -363,9 +345,9 @@ func createTriggers(ctx context.Context, f fn.Function, client clientservingv1.K attributes[key] = value } - err = eventingClient.CreateTrigger(ctx, &eventingv1.Trigger{ + err := eventingClient.CreateTrigger(ctx, &eventingv1.Trigger{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-function-trigger-%d", ksvc.Name, i), + Name: fmt.Sprintf("%s-function-trigger-%d", ksvc.GetName(), i), OwnerReferences: []metav1.OwnerReference{ { APIVersion: ksvc.APIVersion, @@ -382,7 +364,7 @@ func createTriggers(ctx context.Context, f fn.Function, client clientservingv1.K Ref: &duckv1.KReference{ APIVersion: ksvc.APIVersion, Kind: ksvc.Kind, - Name: ksvc.Name, + Name: ksvc.GetName(), }}, Filter: &eventingv1.TriggerFilter{ @@ -398,70 +380,32 @@ func createTriggers(ctx context.Context, f fn.Function, client clientservingv1.K return nil } -func probeFor(url string) *corev1.Probe { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: url, - }, - }, - } -} - -func setHealthEndpoints(f fn.Function, c *corev1.Container) *corev1.Container { - // Set the defaults - c.LivenessProbe = probeFor(LIVENESS_ENDPOINT) - c.ReadinessProbe = probeFor(READINESS_ENDPOINT) - - // If specified in func.yaml, the provided values override the defaults - if f.Deploy.HealthEndpoints.Liveness != "" { - c.LivenessProbe = probeFor(f.Deploy.HealthEndpoints.Liveness) - } - if f.Deploy.HealthEndpoints.Readiness != "" { - c.ReadinessProbe = probeFor(f.Deploy.HealthEndpoints.Readiness) - } - return c -} - -func generateNewService(f fn.Function, decorator DeployDecorator, daprInstalled bool) (*v1.Service, error) { - // set defaults to the values that avoid the following warning "Kubernetes default value is insecure, Knative may default this to secure in a future release" - runAsNonRoot := true - allowPrivilegeEscalation := false - capabilities := corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - } - seccompProfile := corev1.SeccompProfile{ - Type: corev1.SeccompProfileType("RuntimeDefault"), - } +func generateNewService(f fn.Function, decorator deployer.DeployDecorator, daprInstalled bool) (*servingv1.Service, error) { container := corev1.Container{ Image: f.Deploy.Image, - SecurityContext: &corev1.SecurityContext{ - RunAsNonRoot: &runAsNonRoot, - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - Capabilities: &capabilities, - SeccompProfile: &seccompProfile, - }, } - setHealthEndpoints(f, &container) + + k8s.SetSecurityContext(&container) + k8s.SetHealthEndpoints(f, &container) referencedSecrets := sets.New[string]() referencedConfigMaps := sets.New[string]() referencedPVC := sets.New[string]() - newEnv, newEnvFrom, err := processEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + newEnv, newEnvFrom, err := k8s.ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) if err != nil { return nil, err } container.Env = newEnv container.EnvFrom = newEnvFrom - newVolumes, newVolumeMounts, err := processVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVC) + newVolumes, newVolumeMounts, err := k8s.ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVC) if err != nil { return nil, err } container.VolumeMounts = newVolumeMounts - labels, err := generateServiceLabels(f, decorator) + labels, err := deployer.GenerateCommonLabels(f, decorator) if err != nil { return nil, err } @@ -475,20 +419,20 @@ func generateNewService(f fn.Function, decorator DeployDecorator, daprInstalled revisionAnnotations[k] = v } - service := &v1.Service{ + service := &servingv1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: f.Name, Labels: labels, Annotations: annotations, }, - Spec: v1.ServiceSpec{ - ConfigurationSpec: v1.ConfigurationSpec{ - Template: v1.RevisionTemplateSpec{ + Spec: servingv1.ServiceSpec{ + ConfigurationSpec: servingv1.ConfigurationSpec{ + Template: servingv1.RevisionTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Annotations: revisionAnnotations, }, - Spec: v1.RevisionSpec{ + Spec: servingv1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{ container, @@ -510,53 +454,14 @@ func generateNewService(f fn.Function, decorator DeployDecorator, daprInstalled return service, nil } -// generateServiceLabels creates a final map of service labels based -// on the function's defined labels plus the -// application of any provided label decorator. -func generateServiceLabels(f fn.Function, d DeployDecorator) (ll map[string]string, err error) { - ll, err = f.LabelsMap() - if err != nil { - return - } +// generateServiceAnnotations creates a final map of service annotations. +// It uses the common annotation generator and adds Knative-specific annotations. +func generateServiceAnnotations(f fn.Function, d deployer.DeployDecorator, previousService *servingv1.Service, daprInstalled bool) (aa map[string]string) { + // Start with common annotations (includes Dapr, user annotations, and decorator) + aa = deployer.GenerateCommonAnnotations(f, d, daprInstalled, KnativeDeployerName) - if f.Domain != "" { - ll["func.domain"] = f.Domain - } - - if d != nil { - ll = d.UpdateLabels(f, ll) - } - - return -} - -// generateServiceAnnotations creates a final map of service annotations based -// on static defaults plus the function's defined annotations plus the -// application of any provided annotation decorator. -// Also sets `serving.knative.dev/creator` to a value specified in annotations in the service reference in the previousService parameter, -// this is beneficial when we are updating a service to pass validation on Knative side - the annotation is immutable. -func generateServiceAnnotations(f fn.Function, d DeployDecorator, previousService *v1.Service, daprInstalled bool) (aa map[string]string) { - aa = make(map[string]string) - - if daprInstalled { - // Enables Dapr support. - // Has no effect unless the target cluster has Dapr control plane installed. - for k, v := range daprAnnotations(f.Name) { - aa[k] = v - } - } - - // Function-defined annotations - for k, v := range f.Deploy.Annotations { - aa[k] = v - } - - // Decorator - if d != nil { - aa = d.UpdateAnnotations(f, aa) - } - - // Set correct creator if we are updating a function + // Set correct creator if we are updating a function (Knative-specific) + // This annotation is immutable and must be preserved when updating if previousService != nil { knativeCreatorAnnotation := "serving.knative.dev/creator" if val, ok := previousService.Annotations[knativeCreatorAnnotation]; ok { @@ -567,22 +472,8 @@ func generateServiceAnnotations(f fn.Function, d DeployDecorator, previousServic return } -// annotations which, if included and Dapr control plane is installed in -// the target cluster will result in a sidecar exposing the dapr HTTP API -// on localhost:3500 and metrics on 9092 -func daprAnnotations(appid string) map[string]string { - // make optional - aa := make(map[string]string) - aa["dapr.io/app-id"] = appid - aa["dapr.io/enabled"] = DaprEnabled - aa["dapr.io/metrics-port"] = DaprMetricsPort - aa["dapr.io/app-port"] = "8080" - aa["dapr.io/enable-api-logging"] = DaprEnableAPILogging - return aa -} - -func updateService(f fn.Function, previousService *v1.Service, newEnv []corev1.EnvVar, newEnvFrom []corev1.EnvFromSource, newVolumes []corev1.Volume, newVolumeMounts []corev1.VolumeMount, decorator DeployDecorator, daprInstalled bool) func(service *v1.Service) (*v1.Service, error) { - return func(service *v1.Service) (*v1.Service, error) { +func updateService(f fn.Function, previousService *servingv1.Service, newEnv []corev1.EnvVar, newEnvFrom []corev1.EnvFromSource, newVolumes []corev1.Volume, newVolumeMounts []corev1.VolumeMount, decorator deployer.DeployDecorator, daprInstalled bool) func(service *servingv1.Service) (*servingv1.Service, error) { + return func(service *servingv1.Service) (*servingv1.Service, error) { // Removing the name so the k8s server can fill it in with generated name, // this prevents conflicts in Revision name when updating the KService from multiple places. service.Spec.Template.Name = "" @@ -612,14 +503,14 @@ func updateService(f fn.Function, previousService *v1.Service, newEnv []corev1.E // config. At runtime this configuration file could be consulted. I don't // know what this would mean for developers using the func library directly. cp := &service.Spec.Template.Spec.Containers[0] - setHealthEndpoints(f, cp) + k8s.SetHealthEndpoints(f, cp) err := setServiceOptions(&service.Spec.Template, f.Deploy.Options) if err != nil { return service, err } - labels, err := generateServiceLabels(f, decorator) + labels, err := deployer.GenerateCommonLabels(f, decorator) if err != nil { return nil, err } @@ -641,397 +532,9 @@ func updateService(f fn.Function, previousService *v1.Service, newEnv []corev1.E } } -// processEnvs generates array of EnvVars and EnvFromSources from a function config -// envs: -// - name: EXAMPLE1 # ENV directly from a value -// value: value1 -// - name: EXAMPLE2 # ENV from the local ENV var -// value: {{ env:MY_ENV }} -// - name: EXAMPLE3 -// value: {{ secret:example-secret:key }} # ENV from a key in Secret -// - value: {{ secret:example-secret }} # all ENVs from Secret -// - name: EXAMPLE4 -// value: {{ configMap:configMapName:key }} # ENV from a key in ConfigMap -// - value: {{ configMap:configMapName }} # all key-pair values from ConfigMap are set as ENV -func processEnvs(envs []fn.Env, referencedSecrets, referencedConfigMaps *sets.Set[string]) ([]corev1.EnvVar, []corev1.EnvFromSource, error) { - - envs = withOpenAddress(envs) // prepends ADDRESS=0.0.0.0 if not extant - - envVars := []corev1.EnvVar{{Name: "BUILT", Value: time.Now().Format("20060102T150405")}} - envFrom := []corev1.EnvFromSource{} - - for _, env := range envs { - if env.Name == nil && env.Value != nil { - // all key-pair values from secret/configMap are set as ENV, eg. {{ secret:secretName }} or {{ configMap:configMapName }} - if strings.HasPrefix(*env.Value, "{{") { - envFromSource, err := createEnvFromSource(*env.Value, referencedSecrets, referencedConfigMaps) - if err != nil { - return nil, nil, err - } - envFrom = append(envFrom, *envFromSource) - continue - } - } else if env.Name != nil && env.Value != nil { - if strings.HasPrefix(*env.Value, "{{") { - slices := strings.Split(strings.Trim(*env.Value, "{} "), ":") - if len(slices) == 3 { - // ENV from a key in secret/configMap, eg. FOO={{ secret:secretName:key }} FOO={{ configMap:configMapName.key }} - valueFrom, err := createEnvVarSource(slices, referencedSecrets, referencedConfigMaps) - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, ValueFrom: valueFrom}) - if err != nil { - return nil, nil, err - } - continue - } else if len(slices) == 2 { - // ENV from the local ENV var, eg. FOO={{ env:LOCAL_ENV }} - localValue, err := processLocalEnvValue(*env.Value) - if err != nil { - return nil, nil, err - } - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: localValue}) - continue - } - } else { - // a standard ENV with key and value, eg. FOO=bar - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: *env.Value}) - continue - } - } - return nil, nil, fmt.Errorf("unsupported env source entry \"%v\"", env) - } - - return envVars, envFrom, nil -} - -// withOpenAddresss prepends ADDRESS=0.0.0.0 to the envs if not present. -// -// This is combined with the value of PORT at runtime to determine the full -// Listener address on which a Function will listen tcp requests. -// -// Runtimes should, by default, only listen on the loopback interface by -// default, as they may be `func run` locally, for security purposes. -// This environment vriable instructs the runtimes to listen on all interfaces -// by default when actually being deployed, since they will need to actually -// listen for client requests and for health readiness/liveness probes. -// -// Should a user wish to securely open their function to only receive requests -// on a specific interface, such as a WireGuar-encrypted mesh network which -// presents as a specific interface, that can be achieved by setting the -// ADDRESS value as an environment variable on their function to the interface -// on which to listen. -// -// NOTE this env is currently only respected by scaffolded Go functions, because -// they are the only ones which support being `func run` locally. Other -// runtimes will respect the value as they are updated to support scaffolding. -func withOpenAddress(ee []fn.Env) []fn.Env { - // TODO: this is unnecessarily complex due to both key and value of the - // envs slice being being pointers. There is an outstanding tech-debt item - // to remove pointers from Function Envs, Volumes, Labels, and Options. - var found bool - for _, e := range ee { - if e.Name != nil && *e.Name == "ADDRESS" { - found = true - break - } - } - if !found { - k := "ADDRESS" - v := "0.0.0.0" - ee = append(ee, fn.Env{Name: &k, Value: &v}) - } - return ee -} - -func createEnvFromSource(value string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvFromSource, error) { - slices := strings.Split(strings.Trim(value, "{} "), ":") - if len(slices) != 2 { - return nil, fmt.Errorf("env requires a value in form \"resourceType:name\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) - } - - envVarSource := corev1.EnvFromSource{} - - typeString := strings.TrimSpace(slices[0]) - sourceName := strings.TrimSpace(slices[1]) - - var sourceType string - - switch typeString { - case "configMap": - sourceType = "ConfigMap" - envVarSource.ConfigMapRef = &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }} - - if !referencedConfigMaps.Has(sourceName) { - referencedConfigMaps.Insert(sourceName) - } - case "secret": - sourceType = "Secret" - envVarSource.SecretRef = &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }} - if !referencedSecrets.Has(sourceName) { - referencedSecrets.Insert(sourceName) - } - default: - return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) - } - - if len(sourceName) == 0 { - return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) - } - - return &envVarSource, nil -} - -func createEnvVarSource(slices []string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvVarSource, error) { - - if len(slices) != 3 { - return nil, fmt.Errorf("env requires a value in form \"resourceType:name:key\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) - } - - envVarSource := corev1.EnvVarSource{} - - typeString := strings.TrimSpace(slices[0]) - sourceName := strings.TrimSpace(slices[1]) - sourceKey := strings.TrimSpace(slices[2]) - - var sourceType string - - switch typeString { - case "configMap": - sourceType = "ConfigMap" - envVarSource.ConfigMapKeyRef = &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }, - Key: sourceKey} - - if !referencedConfigMaps.Has(sourceName) { - referencedConfigMaps.Insert(sourceName) - } - case "secret": - sourceType = "Secret" - envVarSource.SecretKeyRef = &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }, - Key: sourceKey} - - if !referencedSecrets.Has(sourceName) { - referencedSecrets.Insert(sourceName) - } - default: - return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) - } - - if len(sourceName) == 0 { - return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) - } - - if len(sourceKey) == 0 { - return nil, fmt.Errorf("the key referenced by resource %s %q cannot be an empty string", sourceType, sourceName) - } - - return &envVarSource, nil -} - -var evRegex = regexp.MustCompile(`^{{\s*(\w+)\s*:(\w+)\s*}}$`) - -const ( - ctxIdx = 1 - valIdx = 2 -) - -func processLocalEnvValue(val string) (string, error) { - match := evRegex.FindStringSubmatch(val) - if len(match) > valIdx { - if match[ctxIdx] != "env" { - return "", fmt.Errorf("allowed env value entry is \"{{ env:LOCAL_VALUE }}\"; got: %q", match[ctxIdx]) - } - if v, ok := os.LookupEnv(match[valIdx]); ok { - return v, nil - } else { - return "", fmt.Errorf("required local environment variable %q is not set", match[valIdx]) - } - } else { - return val, nil - } -} - -// / processVolumes generates Volumes and VolumeMounts from a function config -// volumes: -// - secret: example-secret # mount Secret as Volume -// path: /etc/secret-volume -// - configMap: example-configMap # mount ConfigMap as Volume -// path: /etc/configMap-volume -// - persistentVolumeClaim: { claimName: example-pvc } # mount PersistentVolumeClaim as Volume -// path: /etc/secret-volume -// - emptyDir: {} # mount EmptyDir as Volume -// path: /etc/configMap-volume -func processVolumes(volumes []fn.Volume, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string]) ([]corev1.Volume, []corev1.VolumeMount, error) { - - createdVolumes := sets.NewString() - usedPaths := sets.NewString() - - newVolumes := []corev1.Volume{} - newVolumeMounts := []corev1.VolumeMount{} - - for _, vol := range volumes { - - volumeName := "" - - if vol.Secret != nil { - volumeName = "secret-" + *vol.Secret - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: *vol.Secret, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedSecrets.Has(*vol.Secret) { - referencedSecrets.Insert(*vol.Secret) - } - } - } else if vol.ConfigMap != nil { - volumeName = "config-map-" + *vol.ConfigMap - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: *vol.ConfigMap, - }, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedConfigMaps.Has(*vol.ConfigMap) { - referencedConfigMaps.Insert(*vol.ConfigMap) - } - } - } else if vol.PersistentVolumeClaim != nil { - volumeName = "pvc-" + *vol.PersistentVolumeClaim.ClaimName - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: *vol.PersistentVolumeClaim.ClaimName, - ReadOnly: vol.PersistentVolumeClaim.ReadOnly, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedPVCs.Has(*vol.PersistentVolumeClaim.ClaimName) { - referencedPVCs.Insert(*vol.PersistentVolumeClaim.ClaimName) - } - } - } else if vol.EmptyDir != nil { - volumeName = "empty-dir-" + rand.String(7) - - if !createdVolumes.Has(volumeName) { - - var sizeLimit *resource.Quantity - if vol.EmptyDir.SizeLimit != nil { - sl, err := resource.ParseQuantity(*vol.EmptyDir.SizeLimit) - if err != nil { - return nil, nil, fmt.Errorf("invalid quantity for sizeLimit: %s. Error: %s", *vol.EmptyDir.SizeLimit, err) - } - sizeLimit = &sl - } - - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - Medium: corev1.StorageMedium(vol.EmptyDir.Medium), - SizeLimit: sizeLimit, - }, - }, - }) - createdVolumes.Insert(volumeName) - } - } - - if volumeName != "" { - if !usedPaths.Has(*vol.Path) { - newVolumeMounts = append(newVolumeMounts, corev1.VolumeMount{ - Name: volumeName, - MountPath: *vol.Path, - }) - usedPaths.Insert(*vol.Path) - } else { - return nil, nil, fmt.Errorf("mount path %s is defined multiple times", *vol.Path) - } - } - } - - return newVolumes, newVolumeMounts, nil -} - -// checkResourcesArePresent returns error if Secrets or ConfigMaps -// referenced in input sets are not deployed on the cluster in the specified namespace -func checkResourcesArePresent(ctx context.Context, namespace string, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string], referencedServiceAccount string) error { - - errMsg := "" - for s := range *referencedSecrets { - _, err := k8s.GetSecret(ctx, s, namespace) - if err != nil { - if errors.IsForbidden(err) { - errMsg += " Ensure that the service account has the necessary permissions to access the secret.\n" - } else { - errMsg += fmt.Sprintf(" referenced Secret \"%s\" is not present in namespace \"%s\"\n", s, namespace) - } - } - } - - for cm := range *referencedConfigMaps { - _, err := k8s.GetConfigMap(ctx, cm, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced ConfigMap \"%s\" is not present in namespace \"%s\"\n", cm, namespace) - } - } - - for pvc := range *referencedPVCs { - _, err := k8s.GetPersistentVolumeClaim(ctx, pvc, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced PersistentVolumeClaim \"%s\" is not present in namespace \"%s\"\n", pvc, namespace) - } - } - - // check if referenced ServiceAccount is present in the namespace if it is not default - if referencedServiceAccount != "" && referencedServiceAccount != "default" { - err := k8s.GetServiceAccount(ctx, referencedServiceAccount, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced ServiceAccount \"%s\" is not present in namespace \"%s\"\n", referencedServiceAccount, namespace) - } - } - - if errMsg != "" { - return fmt.Errorf("error(s) while validating resources:\n%s", errMsg) - } - - return nil -} - // setServiceOptions sets annotations on Service Revision Template or in the Service Spec // from values specified in function configuration options -func setServiceOptions(template *v1.RevisionTemplateSpec, options fn.Options) error { - +func setServiceOptions(template *servingv1.RevisionTemplateSpec, options fn.Options) error { toRemove := []string{} toUpdate := map[string]string{} @@ -1122,6 +625,14 @@ func setServiceOptions(template *v1.RevisionTemplateSpec, options fn.Options) er return servingclientlib.UpdateRevisionTemplateAnnotations(template, toUpdate, toRemove) } +func UsesKnativeDeployer(annotations map[string]string) bool { + deployer, ok := annotations[deployer.DeployerNameAnnotation] + + // annotation is not set (which defines for backwards compatibility the knative deployer) + // or the deployer is set explicitly to the knative deployer + return !ok || deployer == KnativeDeployerName +} + // wrapDeployerClientError wraps Kubernetes client creation errors with typed errors func wrapDeployerClientError(err error) error { if err == nil { diff --git a/pkg/knative/deployer_int_test.go b/pkg/knative/deployer_int_test.go index 5b47cd0830..41706ac2fe 100644 --- a/pkg/knative/deployer_int_test.go +++ b/pkg/knative/deployer_int_test.go @@ -1,858 +1,60 @@ //go:build integration +// +build integration package knative_test import ( - "context" - "encoding/json" - "fmt" - "net/http" - "os" - "path/filepath" - "strings" "testing" - "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - - eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/k8s" + deployertesting "knative.dev/func/pkg/deployer/testing" "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" - v1 "knative.dev/pkg/apis/duck/v1" - - fntest "knative.dev/func/pkg/testing" ) -// TestInt_Deploy ensures that the deployer creates a callable service. -// See TestInt_Metadata for Labels, Volumes, Envs. -// See TestInt_Events for Subscriptions -func TestInt_Deploy(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-deploy-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - // Not really necessary, but it allows us to reuse the "invoke" method: - handlerPath := filepath.Join(root, "handle.go") - if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { - t.Fatal(err) - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - instance, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Invoke - statusCode, _ := invoke(t, ctx, instance.Route) - if statusCode != http.StatusOK { - t.Fatalf("expected 200 OK, got %d", statusCode) - } +func TestInt_FullPath(t *testing.T) { + deployertesting.TestInt_FullPath(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(true), + knative.NewLister(true), + knative.NewDescriber(true), + knative.KnativeDeployerName) +} +func TestInt_Deploy(t *testing.T) { + deployertesting.TestInt_Deploy(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(false), + knative.NewDescriber(false), + knative.KnativeDeployerName) } -// TestInt_Metadata ensures that Secrets, Labels, and Volumes are applied -// when deploying. func TestInt_Metadata(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-metadata-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - // Cluster Resources - // ----------------- - // Remote Secret - secretName := "func-int-knative-meatadata-secret" + rand.String(5) - secretValues := map[string]string{ - "SECRET_KEY_A": "secret-value-a", - "SECRET_KEY_B": "secret-value-b", - } - createSecret(t, ns, secretName, secretValues) - - // Remote ConfigMap - configMapName := "func-int-knative-metadata-configmap" + rand.String(5) - configMap := map[string]string{ - "CONFIGMAP_KEY_A": "configmap-value-a", - "CONFIGMAP_KEY_B": "configmap-value-b", - } - createConfigMap(t, ns, configMapName, configMap) - - // Create Local Environment Variable - t.Setenv("LOCAL_KEY_A", "local-value") - - // Function - // -------- - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - handlerPath := filepath.Join(root, "handle.go") - if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { - t.Fatal(err) - } - - // ENVS - // A static environment variable - f.Run.Envs.Add("STATIC", "static-value") - // from a local environment variable - f.Run.Envs.Add("LOCAL", "{{ env:LOCAL_KEY_A }}") - // From a Secret - f.Run.Envs.Add("SECRET", "{{ secret: "+secretName+":SECRET_KEY_A }}") - // From a Secret (all) - f.Run.Envs.Add("", "{{ secret: "+secretName+" }}") - // From a ConfigMap (by key) - f.Run.Envs.Add("CONFIGMAP", "{{ configMap: "+configMapName+":CONFIGMAP_KEY_A }}") - // From a ConfigMap (all) - f.Run.Envs.Add("", "{{ configMap: "+configMapName+" }}") - - // VOLUMES - // from a Secret - secretPath := "/mnt/secret" - f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ - Secret: &secretName, - Path: &secretPath, - }) - // From a ConfigMap - configMapPath := "/mnt/configmap" - f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ - ConfigMap: &configMapName, - Path: &configMapPath, - }) - // As EmptyDir - emptyDirPath := "/mnt/emptydir" - f.Run.Volumes = append(f.Run.Volumes, fn.Volume{ - EmptyDir: &fn.EmptyDir{}, - Path: &emptyDirPath, - }) - - // Deploy - // ------ - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - instance, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Assertions - // ---------- - - // Invoke - _, result := invoke(t, ctx, instance.Route) - - // Verify Envs - if result.EnvVars["STATIC"] != "static-value" { - t.Fatalf("STATIC env not set correctly, got: %s", result.EnvVars["STATIC"]) - } - if result.EnvVars["LOCAL"] != "local-value" { - t.Fatalf("LOCAL env not set correctly, got: %s", result.EnvVars["LOCAL"]) - } - if result.EnvVars["SECRET"] != "secret-value-a" { - t.Fatalf("SECRET env not set correctly, got: %s", result.EnvVars["SECRET"]) - } - if result.EnvVars["SECRET_KEY_A"] != "secret-value-a" { - t.Fatalf("SECRET_KEY_A not set correctly, got: %s", result.EnvVars["SECRET_KEY_A"]) - } - if result.EnvVars["SECRET_KEY_B"] != "secret-value-b" { - t.Fatalf("SECRET_KEY_B not set correctly, got: %s", result.EnvVars["SECRET_KEY_B"]) - } - if result.EnvVars["CONFIGMAP"] != "configmap-value-a" { - t.Fatalf("CONFIGMAP env not set correctly, got: %s", result.EnvVars["CONFIGMAP"]) - } - if result.EnvVars["CONFIGMAP_KEY_A"] != "configmap-value-a" { - t.Fatalf("CONFIGMAP_KEY_A not set correctly, got: %s", result.EnvVars["CONFIGMAP_KEY_A"]) - } - if result.EnvVars["CONFIGMAP_KEY_B"] != "configmap-value-b" { - t.Fatalf("CONFIGMAP_KEY_B not set correctly, got: %s", result.EnvVars["CONFIGMAP_KEY_B"]) - } - - // Verify Volumes - if !result.Mounts["/mnt/secret"] { - t.Fatalf("Secret mount /mnt/secret not found or not mounted") - } - if !result.Mounts["/mnt/configmap"] { - t.Fatalf("ConfigMap mount /mnt/configmap not found or not mounted") - } - if !result.Mounts["/mnt/emptydir"] { - t.Fatalf("EmptyDir mount /mnt/emptydir not found or not mounted") - } + deployertesting.TestInt_Metadata(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(false), + knative.NewDescriber(false), + knative.KnativeDeployerName) } -// TestInt_Events ensures that eventing triggers work. func TestInt_Events(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-events-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - // Trigger - // ------- - triggerName := "func-int-knative-events-trigger" - validator := createTrigger(t, ctx, ns, triggerName, name) - - // Function - // -------- - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Deploy - // ------ - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - instance, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Assertions - // ---------- - if err = validator(instance); err != nil { - t.Fatal(err) - } + deployertesting.TestInt_Events(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(false), + knative.NewDescriber(false), + knative.KnativeDeployerName) } -// TestInt_Scale spot-checks that the scale settings are applied by -// ensuring the service is started multiple times when minScale=2 func TestInt_Scale(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-scale-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - // Note: There is no reason for all these being pointers: - minScale := int64(2) - maxScale := int64(100) - f.Deploy.Options = fn.Options{ - Scale: &fn.ScaleOptions{ - Min: &minScale, - Max: &maxScale, - }, - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - _, err = client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Assertions - // ---------- - - // Check the actual number of pods running using Kubernetes API - // This is much more reliable than checking logs - cliSet, err := k8s.NewKubernetesClientset() - if err != nil { - t.Fatal(err) - } - servingClient, err := knative.NewServingClient(ns) - if err != nil { - t.Fatal(err) - } - ksvc, err := servingClient.GetService(ctx, name) - if err != nil { - t.Fatal(err) - } - podList, err := cliSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - readyPods := 0 - for _, pod := range podList.Items { - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { - readyPods++ - break - } - } - } - t.Logf("Found %d ready pods for revision %s (minScale=%d)", readyPods, ksvc.Status.LatestCreatedRevisionName, minScale) - - // Verify minScale is respected - if readyPods < int(minScale) { - t.Errorf("Expected at least %d pods due to minScale, but found %d ready pods", minScale, readyPods) - } - - // TODO: Should we also spot-check that the maxScale was set? This - // seems a bit too coupled to the Knative implementation for my tastes: - // if ksvc.Spec.Template.Annotations["autoscaling.knative.dev/maxScale"] != fmt.Sprintf("%d", maxScale) { - // t.Errorf("maxScale annotation not set correctly, expected %d, got %s", - // maxScale, ksvc.Spec.Template.Annotations["autoscaling.knative.dev/maxScale"]) - // } + deployertesting.TestInt_Scale(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(false), + knative.NewDescriber(false), + knative.KnativeDeployerName) } -// TestInt_EnvsUpdate ensures that removing and updating envs are correctly -// reflected during a deployment update. func TestInt_EnvsUpdate(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-envsupdate-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - // Function - // -------- - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Write custom test handler - handlerPath := filepath.Join(root, "handle.go") - if err := os.WriteFile(handlerPath, []byte(testHandler), 0644); err != nil { - t.Fatal(err) - } - - // ENVS - f.Run.Envs.Add("STATIC_A", "static-value-a") - f.Run.Envs.Add("STATIC_B", "static-value-b") - - // Deploy - // ------ - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - instance, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Assert Initial ENVS are set - // ---------- - _, result := invoke(t, ctx, instance.Route) - - // Verify Envs - if result.EnvVars["STATIC_A"] != "static-value-a" { - t.Fatalf("STATIC_A env not set correctly, got: %s", result.EnvVars["STATIC_A"]) - } - if result.EnvVars["STATIC_B"] != "static-value-b" { - t.Fatalf("STATIC_B env not set correctly, got: %s", result.EnvVars["STATIC_B"]) - } - t.Logf("Environment variables after initial deploy:") - for k, v := range result.EnvVars { - if strings.HasPrefix(k, "STATIC") { - t.Logf(" %s=%s", k, v) - } - } - - // Modify Envs and Redeploy - // ------------------------ - // Removes one and modifies the other - f.Run.Envs = fn.Envs{} // Reset to empty Envs - f.Run.Envs.Add("STATIC_A", "static-value-a-updated") - - // Deploy without rebuild (only env vars changed, code is the same) - f, err = client.Deploy(ctx, f, fn.WithDeploySkipBuildCheck(true)) - if err != nil { - t.Fatal(err) - } - - // Wait for function to be ready - instance, err = client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Assertions - // ---------- - _, result = invoke(t, ctx, instance.Route) - - // Verify Envs - // Log all environment variables for debugging - t.Logf("Environment variables after update:") - for k, v := range result.EnvVars { - if strings.HasPrefix(k, "STATIC") { - t.Logf(" %s=%s", k, v) - } - } - - // Ensure that STATIC_A is changed to the new value - if result.EnvVars["STATIC_A"] != "static-value-a-updated" { - t.Fatalf("STATIC_A env not updated correctly, got: %s", result.EnvVars["STATIC_A"]) - } - // Ensure that STATIC_B no longer exists - if _, exists := result.EnvVars["STATIC_B"]; exists { - // FIXME: Known issue - Knative serving bug - // Tests confirm that the pod deployed does NOT have the environment variable - // STATIC_B set (verified via kubectl describe pod), yet the service itself - // reports the environment variable when invoked via HTTP. - // This appears to be a Knative serving issue where removed environment - // variables persist in the running container despite not being in the pod spec. - // Possible causes: - // 1. Container runtime caching environment at startup - // 2. Knative queue proxy sidecar caching/injecting old values - // 3. Service mesh layer (Istio/Envoy) caching - // TODO: File issue with Knative project - t.Logf("WARNING: STATIC_B env should have been removed but still exists with value: %s (Knative bug)", result.EnvVars["STATIC_B"]) - // t.Fatalf("STATIC_B env should have been removed but still exists with value: %s", result.EnvVars["STATIC_B"]) - } -} - -// Helper functions -// ================ - -// namespace returns the integration test namespace or that specified by -// FUNC_INT_NAMESPACE (creating if necessary) -func namespace(t *testing.T, ctx context.Context) string { - t.Helper() - - cliSet, err := k8s.NewKubernetesClientset() - if err != nil { - t.Fatal(err) - } - - // TODO: choose FUNC_INT_NAMESPACE if it exists? - - namespace := fntest.DefaultIntTestNamespacePrefix + "-" + rand.String(5) - - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - Spec: corev1.NamespaceSpec{}, - } - _, err = cliSet.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := cliSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) - if err != nil { - t.Logf("error deleting namespace: %v", err) - } - }) - t.Log("created namespace: ", namespace) - - return namespace -} - -// registry returns the registry to use for tests -func registry() string { - // Use environment variable if set, otherwise use localhost registry - if reg := os.Getenv("FUNC_INT_TEST_REGISTRY"); reg != "" { - return reg - } - // Default to localhost registry (same as E2E tests) - return fntest.DefaultIntTestRegistry -} - -// Decode response -type result struct { - EnvVars map[string]string - Mounts map[string]bool -} - -func invoke(t *testing.T, ctx context.Context, route string) (statusCode int, r result) { - req, err := http.NewRequestWithContext(ctx, "GET", route, nil) - if err != nil { - t.Fatal(err) - } - httpClient := &http.Client{Timeout: 30 * time.Second} - resp, err := httpClient.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("expected 200 OK, got %d", resp.StatusCode) - } - if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { - t.Fatal(err) - } - return resp.StatusCode, r -} - -func createTrigger(t *testing.T, ctx context.Context, namespace, triggerName, functionName string) func(fn.Instance) error { - t.Helper() - tr := &eventingv1.Trigger{ - ObjectMeta: metav1.ObjectMeta{ - Name: triggerName, - }, - Spec: eventingv1.TriggerSpec{ - Broker: "testing-broker", - Subscriber: v1.Destination{Ref: &v1.KReference{ - Kind: "Service", - Namespace: namespace, - Name: functionName, - APIVersion: "serving.knative.dev/v1", - }}, - Filter: &eventingv1.TriggerFilter{ - Attributes: map[string]string{ - "source": "test-event-source", - "type": "test-event-type", - }, - }, - }, - } - eventingClient, err := knative.NewEventingClient(namespace) - if err != nil { - t.Fatal(err) - } - err = eventingClient.CreateTrigger(ctx, tr) - if err != nil { - t.Fatal(err) - } - - deferCleanup(t, namespace, "trigger", triggerName) - - return func(instance fn.Instance) error { - if len(instance.Subscriptions) != 1 { - return fmt.Errorf("exactly one subscription is expected, got %v", len(instance.Subscriptions)) - } else { - if instance.Subscriptions[0].Broker != "testing-broker" { - return fmt.Errorf("expected broker 'testing-broker', got %q", instance.Subscriptions[0].Broker) - } - if instance.Subscriptions[0].Source != "test-event-source" { - return fmt.Errorf("expected source 'test-event-source', got %q", instance.Subscriptions[0].Source) - } - if instance.Subscriptions[0].Type != "test-event-type" { - return fmt.Errorf("expected type 'test-event-type', got %q", instance.Subscriptions[0].Type) - } - } - return nil - } -} - -// createSecret creates a Kubernetes secret with the given name and data -func createSecret(t *testing.T, namespace, name string, data map[string]string) { - t.Helper() - - cliSet, err := k8s.NewKubernetesClientset() - if err != nil { - t.Fatal(err) - } - - // Convert string map to byte map - byteData := make(map[string][]byte) - for k, v := range data { - byteData[k] = []byte(v) - } - - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Data: byteData, - Type: corev1.SecretTypeOpaque, - } - - _, err = cliSet.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - deferCleanup(t, namespace, "secret", name) -} - -// createConfigMap creates a Kubernetes configmap with the given name and data -func createConfigMap(t *testing.T, namespace, name string, data map[string]string) { - t.Helper() - - cliSet, err := k8s.NewKubernetesClientset() - if err != nil { - t.Fatal(err) - } - - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Data: data, - } - - _, err = cliSet.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - deferCleanup(t, namespace, "configmap", name) -} - -// deferCleanup provides cleanup for K8s resources -func deferCleanup(t *testing.T, namespace string, resourceType string, name string) { - t.Helper() - - switch resourceType { - case "secret": - t.Cleanup(func() { - if cliSet, err := k8s.NewKubernetesClientset(); err == nil { - _ = cliSet.CoreV1().Secrets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - } - }) - case "configmap": - t.Cleanup(func() { - if cliSet, err := k8s.NewKubernetesClientset(); err == nil { - _ = cliSet.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - } - }) - case "trigger": - t.Cleanup(func() { - if eventingClient, err := knative.NewEventingClient(namespace); err == nil { - _ = eventingClient.DeleteTrigger(context.Background(), name) - } - }) - } -} - -// Test Handler -// ============ -const testHandler = `package function - -import ( - "encoding/json" - "net/http" - "os" - "strings" -) - -type Response struct { - EnvVars map[string]string - Mounts map[string]bool -} - -type Function struct {} - -func New() *Function { - return &Function{} -} - -func (f *Function) Handle(w http.ResponseWriter, req *http.Request) { - resp := Response{ - EnvVars: make(map[string]string), - Mounts: make(map[string]bool), - } - - // Collect environment variables - for _, env := range os.Environ() { - parts := strings.SplitN(env, "=", 2) - if len(parts) == 2 { - resp.EnvVars[parts[0]] = parts[1] - } - } - - // Check known mount paths - just verify they exist as directories - mountPaths := []string{"/mnt/secret", "/mnt/configmap", "/mnt/emptydir"} - for _, mountPath := range mountPaths { - if info, err := os.Stat(mountPath); err == nil && info.IsDir() { - resp.Mounts[mountPath] = true - } else { - resp.Mounts[mountPath] = false - } - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(resp) + deployertesting.TestInt_EnvsUpdate(t, + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(false), + knative.NewDescriber(false), + knative.KnativeDeployerName) } -` diff --git a/pkg/knative/describer.go b/pkg/knative/describer.go index 6bfd8919d0..2e6a028741 100644 --- a/pkg/knative/describer.go +++ b/pkg/knative/describer.go @@ -7,7 +7,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" clientservingv1 "knative.dev/client/pkg/serving/v1" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" - fn "knative.dev/func/pkg/functions" ) @@ -26,30 +25,48 @@ func NewDescriber(verbose bool) *Describer { // escaped. Therefore as a knative (kube) implementation detal proper full // names have to be escaped on the way in and unescaped on the way out. ex: // www.example-site.com -> www-example--site-com -func (d *Describer) Describe(ctx context.Context, name, namespace string) (description fn.Instance, err error) { +func (d *Describer) Describe(ctx context.Context, name, namespace string) (fn.Instance, error) { if namespace == "" { - err = fmt.Errorf("function namespace is required when describing %q", name) - return + return fn.Instance{}, fmt.Errorf("function namespace is required when describing %q", name) } servingClient, err := NewServingClient(namespace) if err != nil { - return + return fn.Instance{}, err } eventingClient, err := NewEventingClient(namespace) if err != nil { - return + return fn.Instance{}, err } service, err := servingClient.GetService(ctx, name) if err != nil { - return + // If we can't get the service, check why + if IsCRDNotFoundError(err) { + // Knative Serving not installed - we don't handle this + return fn.Instance{}, fn.ErrNotHandled + } + if errors.IsNotFound(err) { + // Service doesn't exist as a Knative service - we don't handle this + return fn.Instance{}, fn.ErrNotHandled + } + // Some other error (permissions, network, etc.) - this is a real error + // We can't determine if we should handle it, so propagate it + return fn.Instance{}, fmt.Errorf("failed to check if service uses Knative: %w", err) } + // We got the service, now check if we should handle it + if !UsesKnativeDeployer(service.Annotations) { + // no need to handle this service + return fn.Instance{}, fn.ErrNotHandled + } + + // We're responsible, for this function --> proceed... + routes, err := servingClient.ListRoutes(ctx, clientservingv1.WithService(name)) if err != nil { - return + return fn.Instance{}, err } routeURLs := make([]string, 0, len(routes.Items)) @@ -62,18 +79,23 @@ func (d *Describer) Describe(ctx context.Context, name, namespace string) (descr primaryRouteURL = routes.Items[0].Status.URL.String() } - description.Name = name - description.Namespace = namespace - description.Route = primaryRouteURL - description.Routes = routeURLs + description := fn.Instance{ + Name: name, + Namespace: namespace, + Deployer: KnativeDeployerName, + Route: primaryRouteURL, + Routes: routeURLs, + Labels: service.Labels, + } triggers, err := eventingClient.ListTriggers(ctx) - // IsNotFound -- Eventing is probably not installed on the cluster - if err != nil && !errors.IsNotFound(err) { - err = nil - return - } else if err != nil { - return + if err != nil { + if errors.IsNotFound(err) || IsCRDNotFoundError(err) { + // No trigger found or Eventing is probably not installed on the cluster --> we're done here + return description, nil + } + + return fn.Instance{}, err } triggerMatches := func(t *eventingv1.Trigger) bool { @@ -98,10 +120,5 @@ func (d *Describer) Describe(ctx context.Context, name, namespace string) (descr description.Subscriptions = subscriptions - // Populate labels from the service - if service.Labels != nil { - description.Labels = service.Labels - } - - return + return description, nil } diff --git a/pkg/knative/describer_int_test.go b/pkg/knative/describer_int_test.go index 8db6200a28..35163c1141 100644 --- a/pkg/knative/describer_int_test.go +++ b/pkg/knative/describer_int_test.go @@ -1,87 +1,19 @@ //go:build integration +// +build integration package knative_test import ( - "context" "testing" - "time" - "k8s.io/apimachinery/pkg/util/rand" - - fn "knative.dev/func/pkg/functions" + describertesting "knative.dev/func/pkg/describer/testing" "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" ) func TestInt_Describe(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-describe-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - _, err = client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Describe - desc, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - if desc.Name != f.Name { - t.Fatalf("expected name %q, got %q", f.Name, desc.Name) - - } - if desc.Namespace != ns { - t.Fatalf("expected namespace %q, got %q", ns, desc.Namespace) - } + describertesting.TestInt_Describe(t, + knative.NewDescriber(true), + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewRemover(true), + knative.KnativeDeployerName) } diff --git a/pkg/knative/errors.go b/pkg/knative/errors.go new file mode 100644 index 0000000000..45a1da3fe4 --- /dev/null +++ b/pkg/knative/errors.go @@ -0,0 +1,23 @@ +package knative + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/meta" +) + +// IsCRDNotFoundError checks if the given error indicates that a requested Kind could not be found and thus the CRD +// most likely is not installed +func IsCRDNotFoundError(err error) bool { + if err == nil { + return false + } + + return meta.IsNoMatchError(err) || + strings.Contains(err.Error(), "no matches for kind") || + strings.Contains(err.Error(), "the server could not find the requested resource") || + ( + // check if it's a "knclient.NewInvalidCRD(...)" error) + strings.HasPrefix(err.Error(), "no or newer Knative ") && + strings.HasSuffix(err.Error(), " API found on the backend, please verify the installation or update the 'kn' client")) +} diff --git a/pkg/knative/labels.go b/pkg/knative/labels.go deleted file mode 100644 index 2dc00214df..0000000000 --- a/pkg/knative/labels.go +++ /dev/null @@ -1,7 +0,0 @@ -package knative - -const ( - DaprEnabled = "true" - DaprMetricsPort = "9092" - DaprEnableAPILogging = "true" -) diff --git a/pkg/knative/lister.go b/pkg/knative/lister.go index cc1eb756b0..ddf86d4534 100644 --- a/pkg/knative/lister.go +++ b/pkg/knative/lister.go @@ -18,20 +18,28 @@ func NewLister(verbose bool) *Lister { } // List functions, optionally specifying a namespace. -func (l *Lister) List(ctx context.Context, namespace string) (items []fn.ListItem, err error) { +func (l *Lister) List(ctx context.Context, namespace string) ([]fn.ListItem, error) { client, err := NewServingClient(namespace) if err != nil { - return + return nil, err } + // TODO: shouldn't this list only services for functions (-> having the function.knative.dev/name label)?!? + lst, err := client.ListServices(ctx) if err != nil { - return + if IsCRDNotFoundError(err) { + // no services found --> nothing to return + return nil, nil + } + return nil, err } - services := lst.Items[:] - - for _, service := range services { + items := make([]fn.ListItem, 0, len(lst.Items)) + for _, service := range lst.Items { + if !UsesKnativeDeployer(service.Annotations) { + continue + } // get status ready := corev1.ConditionUnknown @@ -50,9 +58,11 @@ func (l *Lister) List(ctx context.Context, namespace string) (items []fn.ListIte Runtime: runtimeLabel, URL: service.Status.URL.String(), Ready: string(ready), + Deployer: KnativeDeployerName, } items = append(items, listItem) } - return + + return items, nil } diff --git a/pkg/knative/lister_int_test.go b/pkg/knative/lister_int_test.go index a49b543130..ff76bdc977 100644 --- a/pkg/knative/lister_int_test.go +++ b/pkg/knative/lister_int_test.go @@ -1,93 +1,20 @@ //go:build integration +// +build integration package knative_test import ( - "context" "testing" - "time" - "k8s.io/apimachinery/pkg/util/rand" - - fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" + listertesting "knative.dev/func/pkg/lister/testing" ) func TestInt_List(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-list-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithLister(knative.NewLister(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Wait for function to be ready - _, err = client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Verify with list - list, err := client.List(ctx, "") - if err != nil { - t.Fatal(err) - } - - // Should find at least our function (may have others in namespace) - found := false - for _, item := range list { - if item.Name == f.Name { - found = true - break - } - } - if !found { - t.Errorf("function %s not found in list", f.Name) - } - + listertesting.TestInt_List(t, + knative.NewLister(true), + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewDescriber(true), + knative.NewRemover(true), + knative.KnativeDeployerName) } diff --git a/pkg/knative/logs.go b/pkg/knative/logs.go index a3e9f7ab61..19790f7d09 100644 --- a/pkg/knative/logs.go +++ b/pkg/knative/logs.go @@ -1,18 +1,11 @@ package knative import ( - "bytes" "context" "fmt" "io" - "sync" "time" - "golang.org/x/sync/errgroup" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" "knative.dev/func/pkg/k8s" ) @@ -24,126 +17,6 @@ import ( // // This function runs as long as the passed context is active (i.e. it is required cancel the context to stop log gathering). func GetKServiceLogs(ctx context.Context, namespace, kServiceName, image string, since *time.Time, out io.Writer) error { - client, namespace, err := k8s.NewClientAndResolvedNamespace(namespace) - if err != nil { - return fmt.Errorf("cannot create k8s client: %w", err) - } - - pods := client.CoreV1().Pods(namespace) - - podListOpts := metav1.ListOptions{ - Watch: true, - LabelSelector: fmt.Sprintf("serving.knative.dev/service=%s", kServiceName), - } - - w, err := pods.Watch(ctx, podListOpts) - if err != nil { - return fmt.Errorf("cannot create watch: %w", err) - } - defer w.Stop() - - beingProcessed := make(map[string]bool) - var beingProcessedMu sync.Mutex - - copyLogs := func(pod corev1.Pod) error { - defer func() { - beingProcessedMu.Lock() - delete(beingProcessed, pod.Name) - beingProcessedMu.Unlock() - }() - podLogOpts := corev1.PodLogOptions{ - Container: "user-container", - Follow: true, - } - if since != nil { - sinceTime := metav1.NewTime(*since) - podLogOpts.SinceTime = &sinceTime - } - req := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) - - r, e := req.Stream(ctx) - if e != nil { - return fmt.Errorf("cannot get stream: %w", e) - } - defer r.Close() - _, e = io.Copy(out, r) - if e != nil { - return fmt.Errorf("error copying logs: %w", e) - } - return nil - } - - mayReadLogs := func(pod corev1.Pod) bool { - for _, status := range pod.Status.ContainerStatuses { - if status.Name == "user-container" { - return status.State.Running != nil || status.State.Terminated != nil - } - } - return false - } - - getImage := func(pod corev1.Pod) string { - for _, ctr := range pod.Spec.Containers { - if ctr.Name == "user-container" { - return ctr.Image - } - } - return "" - } - - var eg errgroup.Group - - for event := range w.ResultChan() { - if event.Type == watch.Modified || event.Type == watch.Added { - pod := *event.Object.(*corev1.Pod) - - beingProcessedMu.Lock() - _, loggingAlready := beingProcessed[pod.Name] - beingProcessedMu.Unlock() - - if !loggingAlready && (image == "" || image == getImage(pod)) && mayReadLogs(pod) { - - beingProcessedMu.Lock() - beingProcessed[pod.Name] = true - beingProcessedMu.Unlock() - - eg.Go(func() error { return copyLogs(pod) }) - } - } - } - - err = eg.Wait() - if err != nil { - return fmt.Errorf("error while gathering logs: %w", err) - } - return nil -} - -type SynchronizedBuffer struct { - b bytes.Buffer - mu sync.Mutex -} - -func (b *SynchronizedBuffer) String() string { - b.mu.Lock() - defer b.mu.Unlock() - return b.b.String() -} - -func (b *SynchronizedBuffer) Write(p []byte) (n int, err error) { - b.mu.Lock() - defer b.mu.Unlock() - return b.b.Write(p) -} - -func (b *SynchronizedBuffer) Read(p []byte) (n int, err error) { - b.mu.Lock() - defer b.mu.Unlock() - return b.b.Read(p) -} - -func (b *SynchronizedBuffer) Reset() { - b.mu.Lock() - defer b.mu.Unlock() - b.b.Reset() + selector := fmt.Sprintf("serving.knative.dev/service=%s", kServiceName) + return k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", image, since, out) } diff --git a/pkg/knative/remover.go b/pkg/knative/remover.go index e04c813769..ed81738407 100644 --- a/pkg/knative/remover.go +++ b/pkg/knative/remover.go @@ -22,7 +22,7 @@ type Remover struct { verbose bool } -func (remover *Remover) Remove(ctx context.Context, name, ns string) (err error) { +func (remover *Remover) Remove(ctx context.Context, name, ns string) error { if ns == "" { fmt.Fprintf(os.Stderr, "no namespace defined when trying to delete a function in knative remover\n") return fn.ErrNamespaceRequired @@ -30,16 +30,35 @@ func (remover *Remover) Remove(ctx context.Context, name, ns string) (err error) client, err := NewServingClient(ns) if err != nil { - return + return err } - err = client.DeleteService(ctx, name, RemoveTimeout) + ksvc, err := client.GetService(ctx, name) if err != nil { + // If we can't get the service, check why + if IsCRDNotFoundError(err) { + // Knative Serving not installed - we don't handle this + return fn.ErrNotHandled + } if apiErrors.IsNotFound(err) { - return fn.ErrFunctionNotFound + // Service doesn't exist as a Knative service - we don't handle this + return fn.ErrNotHandled } - err = fmt.Errorf("knative remover failed to delete the service: %v", err) + // Some other error (permissions, network, etc.) - this is a real error + // We can't determine if we should handle it, so propagate it + return fmt.Errorf("failed to get knative service: %w", err) + } + + if !UsesKnativeDeployer(ksvc.Annotations) { + return fn.ErrNotHandled + } + + // We're responsible, for this function --> proceed... + + err = client.DeleteService(ctx, name, RemoveTimeout) + if err != nil { + return fmt.Errorf("knative remover failed to delete the service: %v", err) } - return + return nil } diff --git a/pkg/knative/remover_int_test.go b/pkg/knative/remover_int_test.go index 743189a7c3..6e7f7055a4 100644 --- a/pkg/knative/remover_int_test.go +++ b/pkg/knative/remover_int_test.go @@ -1,108 +1,20 @@ //go:build integration +// +build integration package knative_test import ( - "context" "testing" - "time" - "k8s.io/apimachinery/pkg/util/rand" - - fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" + removertesting "knative.dev/func/pkg/remover/testing" ) func TestInt_Remove(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-remove-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithLister(knative.NewLister(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Wait for function to be ready - _, err = client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - // Verify with list - list, err := client.List(ctx, "") - if err != nil { - t.Fatal(err) - } - found := false - for _, item := range list { - if item.Name == f.Name { - found = true - break - } - } - if !found { - t.Errorf("function %s not found in list", f.Name) - } - - // Remove it - if err := client.Remove(ctx, "", "", f, true); err != nil { - t.Logf("error removing Function: %v", err) - } - - // Verify it is no longer listed - list, err = client.List(ctx, "") - if err != nil { - t.Fatal(err) - } - found = false - for _, item := range list { - if item.Name == f.Name { - found = true - break - } - } - if found { - t.Errorf("function %s was not removed", f.Name) - } - - // Remove - + removertesting.TestInt_Remove(t, + knative.NewRemover(true), + knative.NewDeployer(knative.WithDeployerVerbose(true)), + knative.NewDescriber(true), + knative.NewLister(true), + knative.KnativeDeployerName) } diff --git a/pkg/knative/testdata/test_default_namespace b/pkg/knative/testdata/test_default_namespace deleted file mode 100644 index defbdd43c5..0000000000 --- a/pkg/knative/testdata/test_default_namespace +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - insecure-skip-tls-verify: true - server: https://cluster.example.com.com:6443 - name: cluster.example.com-com:6443 -contexts: -- context: - cluster: cluster.example.com-com:6443 - namespace: test-ns-deploy - user: kube:admin/cluster.example.com-com:6443 - name: test-ns-deploy/cluster.example.com-com:6443/kube:admin -current-context: test-ns-deploy/cluster.example.com-com:6443/kube:admin -kind: Config -preferences: {} -users: -- name: kubeadmin - user: - token: sha256~XXXXexample-test-hash - diff --git a/pkg/knative/testdata/test_empty b/pkg/knative/testdata/test_empty deleted file mode 100644 index 20a1e20e75..0000000000 --- a/pkg/knative/testdata/test_empty +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Config -clusters: -contexts: -preferences: {} -users: diff --git a/pkg/lister/testing/integration_test_helper.go b/pkg/lister/testing/integration_test_helper.go new file mode 100644 index 0000000000..f9c4ca0086 --- /dev/null +++ b/pkg/lister/testing/integration_test_helper.go @@ -0,0 +1,94 @@ +package testing + +//nolint:staticcheck // ST1001: should not use dot imports +import ( + "context" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/rand" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/oci" + . "knative.dev/func/pkg/testing" + . "knative.dev/func/pkg/testing/k8s" +) + +func TestInt_List(t *testing.T, lister fn.Lister, deployer fn.Deployer, describer fn.Describer, remover fn.Remover, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-list-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithListers(lister), + fn.WithDescribers(describer), + fn.WithRemovers(remover), + ) + + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := client.Remove(ctx, "", "", f, true) + if err != nil { + t.Logf("error removing Function: %v", err) + } + }) + + // Wait for function to be ready + _, err = client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Verify with list + list, err := client.List(ctx, ns) + if err != nil { + t.Fatal(err) + } + + // Should find at least our function (may have others in namespace) + found := false + for _, item := range list { + if item.Name == f.Name { + found = true + break + } + } + if !found { + t.Errorf("function %s not found in list", f.Name) + } + +} diff --git a/pkg/pipelines/tekton/pipelines_int_test.go b/pkg/pipelines/tekton/pipelines_int_test.go index 103dc220be..824e617038 100644 --- a/pkg/pipelines/tekton/pipelines_int_test.go +++ b/pkg/pipelines/tekton/pipelines_int_test.go @@ -21,17 +21,15 @@ import ( rbacV1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/func/pkg/k8s" - "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" - "knative.dev/func/pkg/builders/buildpacks" pack "knative.dev/func/pkg/builders/buildpacks" "knative.dev/func/pkg/docker" fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" + "knative.dev/func/pkg/oci" "knative.dev/func/pkg/pipelines/tekton" "knative.dev/func/pkg/random" - . "knative.dev/func/pkg/testing" ) @@ -53,9 +51,9 @@ func newRemoteTestClient(verbose bool) *fn.Client { fn.WithBuilder(pack.NewBuilder(pack.WithVerbose(verbose))), fn.WithPusher(docker.NewPusher(docker.WithCredentialsProvider(testCP))), fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(verbose))), - fn.WithRemover(knative.NewRemover(verbose)), - fn.WithDescriber(knative.NewDescriber(verbose)), - fn.WithRemover(knative.NewRemover(verbose)), + fn.WithDescribers(knative.NewDescriber(verbose), k8s.NewDescriber(verbose)), + fn.WithListers(knative.NewLister(verbose), k8s.NewLister(verbose)), + fn.WithRemovers(knative.NewRemover(verbose), k8s.NewRemover(verbose)), fn.WithPipelinesProvider(tekton.NewPipelinesProvider(tekton.WithCredentialsProvider(testCP), tekton.WithVerbose(verbose))), ) } diff --git a/pkg/pipelines/tekton/pipelines_provider.go b/pkg/pipelines/tekton/pipelines_provider.go index 0d60ba7d45..662394c99d 100644 --- a/pkg/pipelines/tekton/pipelines_provider.go +++ b/pkg/pipelines/tekton/pipelines_provider.go @@ -14,8 +14,6 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/AlecAivazis/survey/v2" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" @@ -26,9 +24,9 @@ import ( pipelineClient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8slabels "k8s.io/apimachinery/pkg/labels" - "knative.dev/func/pkg/docker" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/k8s" diff --git a/pkg/remover/testing/integration_test_helper.go b/pkg/remover/testing/integration_test_helper.go new file mode 100644 index 0000000000..adc9d320d7 --- /dev/null +++ b/pkg/remover/testing/integration_test_helper.go @@ -0,0 +1,106 @@ +package testing + +//nolint:staticcheck // ST1001: should not use dot imports +import ( + "context" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/rand" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/oci" + . "knative.dev/func/pkg/testing" + . "knative.dev/func/pkg/testing/k8s" +) + +func TestInt_Remove(t *testing.T, remover fn.Remover, deployer fn.Deployer, describer fn.Describer, lister fn.Lister, deployerName string) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + name := "func-int-knative-remove-" + rand.String(5) + root := t.TempDir() + ns := Namespace(t, ctx) + + t.Cleanup(cancel) + + client := fn.New( + fn.WithBuilder(oci.NewBuilder("", false)), + fn.WithPusher(oci.NewPusher(true, true, true)), + fn.WithDeployer(deployer), + fn.WithRemovers(remover), + fn.WithDescribers(describer), + fn.WithListers(lister), + ) + + f, err := client.Init(fn.Function{ + Root: root, + Name: name, + Runtime: "go", + Namespace: ns, + Registry: Registry(), + }) + if err != nil { + t.Fatal(err) + } + + // Build + f, err = client.Build(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Push + f, _, err = client.Push(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Deploy + f, err = client.Deploy(ctx, f) + if err != nil { + t.Fatal(err) + } + + // Wait for function to be ready + _, err = client.Describe(ctx, "", "", f) + if err != nil { + t.Fatal(err) + } + + // Verify with list + list, err := client.List(ctx, ns) + if err != nil { + t.Fatal(err) + } + found := false + for _, item := range list { + if item.Name == f.Name { + found = true + break + } + } + if !found { + t.Errorf("function %s not found in list", f.Name) + } + + // Remove it + if err := client.Remove(ctx, "", "", f, true); err != nil { + t.Logf("error removing Function: %v", err) + } + + // Verify it is no longer listed + list, err = client.List(ctx, ns) + if err != nil { + t.Fatal(err) + } + found = false + for _, item := range list { + if item.Name == f.Name { + found = true + break + } + } + if found { + t.Errorf("function %s was not removed", f.Name) + } +} diff --git a/pkg/testing/k8s/testing.go b/pkg/testing/k8s/testing.go new file mode 100644 index 0000000000..954c564fa1 --- /dev/null +++ b/pkg/testing/k8s/testing.go @@ -0,0 +1,48 @@ +package k8s + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "knative.dev/func/pkg/k8s" +) + +const DefaultIntTestNamespacePrefix = "func-int-test" + +// Namespace returns the integration test namespace or that specified by +// FUNC_INT_NAMESPACE (creating if necessary) +func Namespace(t *testing.T, ctx context.Context) string { + t.Helper() + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + + // TODO: choose FUNC_INT_NAMESPACE if it exists? + + namespace := DefaultIntTestNamespacePrefix + "-" + rand.String(5) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + Spec: corev1.NamespaceSpec{}, + } + _, err = cliSet.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := cliSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + if err != nil { + t.Logf("error deleting namespace: %v", err) + } + }) + t.Log("created namespace: ", namespace) + + return namespace +} diff --git a/pkg/testing/testing.go b/pkg/testing/testing.go index 38e34361f7..2b5ad77e2e 100644 --- a/pkg/testing/testing.go +++ b/pkg/testing/testing.go @@ -32,8 +32,6 @@ import ( const DefaultIntTestRegistry = "localhost:50000/func" -const DefaultIntTestNamespacePrefix = "func-int-test" - // Using the given path, create it as a new directory and return a deferrable // which will remove it. // usage: @@ -323,3 +321,13 @@ func ClearEnvs(t *testing.T) { } } } + +// Registry returns the registry to use for tests +func Registry() string { + // Use environment variable if set, otherwise use localhost registry + if reg := os.Getenv("FUNC_INT_TEST_REGISTRY"); reg != "" { + return reg + } + // Default to localhost registry (same as E2E tests) + return DefaultIntTestRegistry +} diff --git a/schema/func_yaml-schema.json b/schema/func_yaml-schema.json index 0d8cd32dea..8801a066fa 100644 --- a/schema/func_yaml-schema.json +++ b/schema/func_yaml-schema.json @@ -107,6 +107,14 @@ "type": "string", "description": "ServiceAccountName is the name of the service account used for the\nfunction pod. The service account must exist in the namespace to\nsucceed.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" }, + "deployer": { + "enum": [ + "knative", + "raw" + ], + "type": "string", + "description": "Deployer specifies the type of deployment to use: \"knative\" or \"raw\"\nDefaults to \"knative\" for backwards compatibility" + }, "subscriptions": { "items": { "$schema": "http://json-schema.org/draft-04/schema#",