diff --git a/cmd/skaffold/app/cmd/lint.go b/cmd/skaffold/app/cmd/lint.go index f2c818a9b32..cff51adf7d2 100644 --- a/cmd/skaffold/app/cmd/lint.go +++ b/cmd/skaffold/app/cmd/lint.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/lint" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/output/log" ) var outFormat string @@ -35,15 +36,21 @@ func NewCmdLint() *cobra.Command { {Value: &outFormat, Name: "format", DefValue: lint.PlainTextOutput, Usage: "Output format. One of: plain-text(default) or json"}}). Hidden(). - NoArgs(lintCmd) + NoArgs(doLint) } -func lintCmd(ctx context.Context, out io.Writer) error { +func doLint(ctx context.Context, out io.Writer) error { + // createRunner initializes state for objects (Docker client, etc.) lint uses + _, _, runCtx, err := createRunner(ctx, out, opts) + log.Entry(ctx).Debugf("starting skaffold lint with runCtx: %v", runCtx) + if err != nil { + return err + } return lint.Lint(ctx, out, lint.Options{ Filename: opts.ConfigurationFile, RepoCacheDir: opts.RepoCacheDir, OutFormat: outFormat, Modules: opts.ConfigurationFilter, Profiles: opts.Profiles, - }) + }, runCtx) } diff --git a/pkg/skaffold/docker/dependencies.go b/pkg/skaffold/docker/dependencies.go index fee8cbe458a..08ab506afd0 100644 --- a/pkg/skaffold/docker/dependencies.go +++ b/pkg/skaffold/docker/dependencies.go @@ -78,6 +78,18 @@ func GetDependencies(ctx context.Context, buildCfg BuildConfig, cfg Config) ([]s return resultPair(result) } +// GetDependencies finds the sources dependency for the given docker artifact. +// it caches the results for the computed dependency which can be used by `GetDependenciesCached` +// All paths are relative to the workspace. +func GetDependenciesByDockerCopyFromTo(ctx context.Context, buildCfg BuildConfig, cfg Config) (map[string][]string, error) { + absDockerfilePath, err := NormalizeDockerfilePath(buildCfg.workspace, buildCfg.dockerfilePath) + if err != nil { + return nil, fmt.Errorf("normalizing dockerfilePath path: %w", err) + } + ftToDependencies := getDependenciesByDockerCopyFromTo(ctx, buildCfg.workspace, buildCfg.dockerfilePath, absDockerfilePath, buildCfg.args, cfg) + return resultPairForDockerCopyFromTo(ftToDependencies) +} + // GetDependenciesCached reads from cache finds the sources dependency for the given docker artifact. // All paths are relative to the workspace. func GetDependenciesCached(ctx context.Context, buildCfg BuildConfig, cfg Config) ([]string, error) { @@ -103,6 +115,17 @@ func resultPair(deps interface{}) ([]string, error) { } } +func resultPairForDockerCopyFromTo(deps interface{}) (map[string][]string, error) { + switch t := deps.(type) { + case error: + return nil, t + case map[string][]string: + return t, nil + default: + return nil, fmt.Errorf("internal error when retrieving cache result of type %T", t) + } +} + func getDependencies(ctx context.Context, workspace string, dockerfilePath string, absDockerfilePath string, buildArgs map[string]*string, cfg Config) interface{} { // If the Dockerfile doesn't exist, we can't compute the dependency. // But since we know the Dockerfile is a dependency, let's return a list @@ -112,7 +135,7 @@ func getDependencies(ctx context.Context, workspace string, dockerfilePath strin return []string{dockerfilePath} } - fts, err := readCopyCmdsFromDockerfile(ctx, false, absDockerfilePath, workspace, buildArgs, cfg) + fts, err := ReadCopyCmdsFromDockerfile(ctx, false, absDockerfilePath, workspace, buildArgs, cfg) if err != nil { return err } @@ -124,7 +147,7 @@ func getDependencies(ctx context.Context, workspace string, dockerfilePath strin deps := make([]string, 0, len(fts)) for _, ft := range fts { - deps = append(deps, ft.from) + deps = append(deps, ft.From) } files, err := WalkWorkspace(workspace, excludes, deps) @@ -151,6 +174,52 @@ func getDependencies(ctx context.Context, workspace string, dockerfilePath strin return dependencies } +func getDependenciesByDockerCopyFromTo(ctx context.Context, workspace string, dockerfilePath string, absDockerfilePath string, buildArgs map[string]*string, cfg Config) interface{} { + // If the Dockerfile doesn't exist, we can't compute the dependency. + // But since we know the Dockerfile is a dependency, let's return a list + // with only that file. It makes errors down the line more actionable + // than returning an error now. + if _, err := os.Stat(absDockerfilePath); os.IsNotExist(err) { + return []string{dockerfilePath} + } + + fts, err := ReadCopyCmdsFromDockerfile(ctx, false, absDockerfilePath, workspace, buildArgs, cfg) + if err != nil { + return err + } + + excludes, err := readDockerignore(workspace, absDockerfilePath) + if err != nil { + return fmt.Errorf("reading .dockerignore: %w", err) + } + + ftToDependencies := map[string][]string{} + for _, ft := range fts { + files, err := WalkWorkspace(workspace, excludes, []string{ft.From}) + if err != nil { + return fmt.Errorf("walking workspace: %w", err) + } + + // Always add dockerfile even if it's .dockerignored. The daemon will need it anyways. + if !filepath.IsAbs(dockerfilePath) { + files[dockerfilePath] = true + } else { + files[absDockerfilePath] = true + } + + // Ignore .dockerignore + delete(files, ".dockerignore") + + var dependencies []string + for file := range files { + dependencies = append(dependencies, file) + } + sort.Strings(dependencies) + ftToDependencies[ft.String()] = dependencies + } + return ftToDependencies +} + // readDockerignore reads patterns to ignore func readDockerignore(workspace string, absDockerfilePath string) ([]string, error) { var excludes []string diff --git a/pkg/skaffold/docker/parse.go b/pkg/skaffold/docker/parse.go index ba569cf5fe8..65f5436e693 100644 --- a/pkg/skaffold/docker/parse.go +++ b/pkg/skaffold/docker/parse.go @@ -42,6 +42,23 @@ import ( "github.com/GoogleContainerTools/skaffold/proto/v1" ) +type FromTo struct { + // From is the relative path (wrt. the skaffold root directory) of the dependency on the host system. + From string + // To is the destination location in the container. Must use slashes as path separator. + To string + // ToIsDir indicates if the `to` path must be treated as directory + ToIsDir bool + // StartLine indicates the starting line in the dockerfile of the copy command + StartLine int + // EndLine indiciates the ending line in the dockerfile of the copy command + EndLine int +} + +func (f *FromTo) String() string { + return fmt.Sprintf("From:%s, To:%s, ToIsDir:%t, StartLine: %d, EndLine: %d", f.From, f.To, f.ToIsDir, f.StartLine, f.EndLine) +} + type from struct { image string as string @@ -55,15 +72,10 @@ type copyCommand struct { dest string // destIsDir indicates if dest must be treated as directory. destIsDir bool -} - -type fromTo struct { - // from is the relative path (wrt. the skaffold root directory) of the dependency on the host system. - from string - // to is the destination location in the container. Must use slashes as path separator. - to string - // toIsDir indicates if the `to` path must be treated as directory - toIsDir bool + // startLine indicates the starting line in the dockerfile of the copy command + startLine int + // endLine indiciates the ending line in the dockerfile of the copy command + endLine int } var ( @@ -71,7 +83,9 @@ var ( RetrieveImage = retrieveImage ) -func readCopyCmdsFromDockerfile(ctx context.Context, onlyLastImage bool, absDockerfilePath, workspace string, buildArgs map[string]*string, cfg Config) ([]fromTo, error) { +// ReadCopyCmdsFromDockerfile parses a given dockerfile for COPY commands accounting for build args, env vars, globs, etc +// and returns an array of FromTos specifying the files that will be copied 'from' local dirs 'to' container dirs in the COPY statements +func ReadCopyCmdsFromDockerfile(ctx context.Context, onlyLastImage bool, absDockerfilePath, workspace string, buildArgs map[string]*string, cfg Config) ([]FromTo, error) { r, err := ioutil.ReadFile(absDockerfilePath) if err != nil { return nil, err @@ -105,6 +119,38 @@ func readCopyCmdsFromDockerfile(ctx context.Context, onlyLastImage bool, absDock return expandSrcGlobPatterns(workspace, cpCmds) } +func ExtractOnlyCopyCommands(absDockerfilePath string) ([]FromTo, error) { + r, err := ioutil.ReadFile(absDockerfilePath) + if err != nil { + return nil, err + } + + res, err := parser.Parse(bytes.NewReader(r)) + if err != nil { + return nil, fmt.Errorf("parsing dockerfile %q: %w", absDockerfilePath, err) + } + + var copied []FromTo + workdir := "/" + envs := make([]string, 0) + for _, node := range res.AST.Children { + switch node.Value { + case command.Add, command.Copy: + cpCmd, err := readCopyCommand(node, envs, workdir) + if err != nil { + return nil, err + } + + if cpCmd != nil && len(cpCmd.srcs) > 0 { + for _, src := range cpCmd.srcs { + copied = append(copied, FromTo{From: src, To: cpCmd.dest, ToIsDir: cpCmd.destIsDir, StartLine: cpCmd.startLine, EndLine: cpCmd.endLine}) + } + } + } + } + return copied, nil +} + // filterUnusedBuildArgs removes entries from the build arguments map that are not found in the dockerfile func filterUnusedBuildArgs(dockerFile io.Reader, buildArgs map[string]*string) (map[string]*string, error) { res, err := parser.Parse(dockerFile) @@ -162,15 +208,15 @@ func expandBuildArgs(nodes []*parser.Node, buildArgs map[string]*string) error { return nil } -func expandSrcGlobPatterns(workspace string, cpCmds []*copyCommand) ([]fromTo, error) { - var fts []fromTo +func expandSrcGlobPatterns(workspace string, cpCmds []*copyCommand) ([]FromTo, error) { + var fts []FromTo for _, cpCmd := range cpCmds { matchesOne := false for _, p := range cpCmd.srcs { path := filepath.Join(workspace, p) if _, err := os.Stat(path); err == nil { - fts = append(fts, fromTo{from: filepath.Clean(p), to: cpCmd.dest, toIsDir: cpCmd.destIsDir}) + fts = append(fts, FromTo{From: filepath.Clean(p), To: cpCmd.dest, ToIsDir: cpCmd.destIsDir, StartLine: cpCmd.startLine, EndLine: cpCmd.endLine}) matchesOne = true continue } @@ -189,7 +235,7 @@ func expandSrcGlobPatterns(workspace string, cpCmds []*copyCommand) ([]fromTo, e return nil, fmt.Errorf("getting relative path of %s", f) } - fts = append(fts, fromTo{from: rel, to: cpCmd.dest, toIsDir: cpCmd.destIsDir}) + fts = append(fts, FromTo{From: rel, To: cpCmd.dest, ToIsDir: cpCmd.destIsDir, StartLine: cpCmd.startLine, EndLine: cpCmd.endLine}) } matchesOne = true } @@ -266,7 +312,6 @@ func extractCopyCommands(ctx context.Context, nodes []*parser.Node, onlyLastImag } } } - return copied, nil } @@ -311,6 +356,8 @@ func readCopyCommand(value *parser.Node, envs []string, workdir string) (*copyCo srcs: srcs, dest: dest, destIsDir: destIsDir, + startLine: value.StartLine, + endLine: value.EndLine, }, nil } diff --git a/pkg/skaffold/docker/syncmap.go b/pkg/skaffold/docker/syncmap.go index b582b503c32..0d4f0601a1f 100644 --- a/pkg/skaffold/docker/syncmap.go +++ b/pkg/skaffold/docker/syncmap.go @@ -36,7 +36,7 @@ func SyncMap(ctx context.Context, workspace string, dockerfilePath string, build } // only the COPY/ADD commands from the last image are syncable - fts, err := readCopyCmdsFromDockerfile(ctx, true, absDockerfilePath, workspace, buildArgs, cfg) + fts, err := ReadCopyCmdsFromDockerfile(ctx, true, absDockerfilePath, workspace, buildArgs, cfg) if err != nil { return nil, err } @@ -57,7 +57,7 @@ func SyncMap(ctx context.Context, workspace string, dockerfilePath string, build // walkWorkspaceWithDestinations walks the given host directories and determines their // location in the container. It returns a map of host path by container destination. // Note: if you change this function, you might also want to modify `WalkWorkspace`. -func walkWorkspaceWithDestinations(workspace string, excludes []string, fts []fromTo) (map[string]string, error) { +func walkWorkspaceWithDestinations(workspace string, excludes []string, fts []FromTo) (map[string]string, error) { dockerIgnored, err := NewDockerIgnorePredicate(workspace, excludes) if err != nil { return nil, err @@ -66,7 +66,7 @@ func walkWorkspaceWithDestinations(workspace string, excludes []string, fts []fr // Walk the workspace srcByDest := make(map[string]string) for _, ft := range fts { - absFrom := filepath.Join(workspace, ft.from) + absFrom := filepath.Join(workspace, ft.From) fi, err := os.Stat(absFrom) if err != nil { @@ -100,23 +100,23 @@ func walkWorkspaceWithDestinations(workspace string, excludes []string, fts []fr return err } - srcByDest[path.Join(ft.to, filepath.ToSlash(relBase))] = relPath + srcByDest[path.Join(ft.To, filepath.ToSlash(relBase))] = relPath return nil }); err != nil { return nil, fmt.Errorf("walking %q: %w", absFrom, err) } case mode.IsRegular(): - ignored, err := dockerIgnored(filepath.Join(workspace, ft.from), fi) + ignored, err := dockerIgnored(filepath.Join(workspace, ft.From), fi) if err != nil { return nil, err } if !ignored { - if ft.toIsDir { - base := filepath.Base(ft.from) - srcByDest[path.Join(ft.to, base)] = ft.from + if ft.ToIsDir { + base := filepath.Base(ft.From) + srcByDest[path.Join(ft.To, base)] = ft.From } else { - srcByDest[ft.to] = ft.from + srcByDest[ft.To] = ft.From } } } diff --git a/pkg/skaffold/lint/dockerfiles.go b/pkg/skaffold/lint/dockerfiles.go new file mode 100644 index 00000000000..8247b26470f --- /dev/null +++ b/pkg/skaffold/lint/dockerfiles.go @@ -0,0 +1,171 @@ +/* +Copyright 2021 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint + +import ( + "context" + "io/ioutil" + "path/filepath" + + "github.com/moby/buildkit/frontend/dockerfile/command" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" +) + +// for testing +// var getDockerDependencies = docker.GetDependencies +var getDockerDependenciesForEachFromTo = docker.GetDependenciesByDockerCopyFromTo +var dockerfileRules = &dockerfileLintRules + +var DockerfileLinters = []Linter{ + &RegExpLinter{}, + &DockerfileCommandLinter{}, +} + +var dockerfileLintRules = []Rule{ + { + RuleType: DockerfileCommandLintRule, + Filter: DockerCommandFilter{ + DockerCommand: command.Copy, + DockerCopySourceRegExp: `.*`, + }, + RuleID: DockerfileCopyOver1000Files, + ExplanationTemplate: `Found docker 'COPY' command where the source directory "{{index .FieldMap "src"}}" has over 1000 files. This has the potential to dramatically slow 'skaffold dev' down ` + + `as skaffold watches all sources files referenced in dockerfile COPY directives for changes. ` + + `If you notice skaffold rebuilding images unnecessarily when non-image-critical files are ` + + `modified, consider changing this to 'COPY $REQUIRED_SOURCE_FILE(s) {{index .FieldMap "dest"}}' for each required source file instead of ` + + `or adding a .dockerignore file (https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring non-image-critical files. skaffold respects files ignored via the .dockerignore`, + ExplanationPopulator: func(params InputParams) (explanationInfo, error) { + return explanationInfo{ + FieldMap: map[string]interface{}{ + "src": params.DockerCopyCommandInfo.From, + "dest": params.DockerCopyCommandInfo.To, + }, + }, nil + }, + LintConditions: []func(InputParams) bool{func(params InputParams) bool { + files := 0 + for range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] { + files++ + } + return files > 1000 + }}, + }, + { + RuleType: DockerfileCommandLintRule, + Filter: DockerCommandFilter{ + DockerCommand: command.Copy, + DockerCopySourceRegExp: `.*`, + }, + RuleID: DockerfileCopyContainsGitDir, + // TODO(aaron-prindle) suggest a full .dockerignore sample - .dockerignore:**/.git + ExplanationTemplate: `Found docker 'COPY' command where the source directory "{{index .FieldMap "src"}}" contains a '.git' directory at {{index .FieldMap "gitDirectoryAbsPath"}}. This has the potential to dramatically slow 'skaffold dev' down ` + + `as skaffold will watch all of the files in the .git directory as skaffold watches all sources files referenced in dockerfile COPY directives for changes. ` + + `skaffold will likely rebuild images unnecessarily when non-image-critical files are ` + + `modified during any git related operation. Consider adding a .dockerignore file (https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring the '.git' directory. skaffold respects files ignored via the .dockerignore`, + ExplanationPopulator: func(params InputParams) (explanationInfo, error) { + var gitDirectoryAbsPath string + for _, dep := range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] { + if filepath.Dir(dep) == ".git" { + gitDirectoryAbsPath = filepath.Join(params.WorkspacePath, filepath.Dir(dep)) + break + } + } + return explanationInfo{ + FieldMap: map[string]interface{}{ + "src": params.DockerCopyCommandInfo.From, + "gitDirectoryAbsPath": gitDirectoryAbsPath, + }, + }, nil + }, + + // TODO(aaron-prindle) currently the LintCondition runs w/ deps that map to a dockerfile and not a specific COPY command. Can make certain rules infeasible + LintConditions: []func(InputParams) bool{func(params InputParams) bool { + for _, dep := range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] { + if filepath.Dir(dep) == ".git" { + return true + } + } + return false + }}, + }, +} + +func GetDockerfilesLintResults(ctx context.Context, opts Options, dockerCfg docker.Config) (*[]Result, error) { + cfgs, err := getConfigSet(ctx, config.SkaffoldOptions{ + ConfigurationFile: opts.Filename, + ConfigurationFilter: opts.Modules, + RepoCacheDir: opts.RepoCacheDir, + Profiles: opts.Profiles, + }) + if err != nil { + return nil, err + } + + l := []Result{} + seen := map[string]bool{} + dockerfileToFromToToDepMap := map[string]map[string][]string{} + workdir, err := realWorkDir() + if err != nil { + return nil, err + } + + for _, c := range cfgs { + for _, a := range c.Build.Artifacts { + if a.DockerArtifact != nil { + ws := filepath.Join(workdir, a.Workspace) + fp := filepath.Join(ws, a.DockerArtifact.DockerfilePath) + if _, ok := seen[fp]; ok { + continue + } + seen[fp] = true + b, err := ioutil.ReadFile(fp) + if err != nil { + return nil, err + } + dockerfile := ConfigFile{ + AbsPath: fp, + RelPath: filepath.Join(a.Workspace, a.DockerArtifact.DockerfilePath), + Text: string(b), + } + // TODO(aaron-prindle) currently this dep map is computed twice; here and in skaffoldyamls.go, make a singleton/share-the-info + // TODO(aaron-prindle) currently copy commands are parsed twice; here and in linters.go + fromToToDepMap, err := getDockerDependenciesForEachFromTo(context.TODO(), + docker.NewBuildConfig(ws, a.ImageName, fp, map[string]*string{}), nil) + if err != nil { + return nil, err + } + dockerfileToFromToToDepMap[fp] = fromToToDepMap + for _, r := range DockerfileLinters { + recs, err := r.Lint(InputParams{ + ConfigFile: dockerfile, + SkaffoldConfig: c, + DockerfileToFromToToDeps: dockerfileToFromToToDepMap, + WorkspacePath: ws, + DockerConfig: dockerCfg, + }, dockerfileRules) + if err != nil { + return nil, err + } + l = append(l, *recs...) + } + } + } + } + return &l, nil +} diff --git a/pkg/skaffold/lint/dockerfiles_test.go b/pkg/skaffold/lint/dockerfiles_test.go new file mode 100644 index 00000000000..e07d61e35f5 --- /dev/null +++ b/pkg/skaffold/lint/dockerfiles_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2021 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + "text/template" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/parser" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/runner/runcontext" + v1 "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest/v1" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util/stringslice" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +var testDockerfile = `ARG BASE +FROM golang:1.15 as builder{{range .}} +COPY {{.From}} {{.To}}{{end}} +COPY local.txt /container-dir +ARG SKAFFOLD_GO_GCFLAGS +RUN go build -gcflags="${SKAFFOLD_GO_GCFLAGS}" -o /app . + +FROM $BASE +COPY --from=builder /app . +` + +func TestGetDockerfilesLintResults(t *testing.T) { + ruleIDToDockerfileRule := map[RuleID]*Rule{} + for i := range dockerfileLintRules { + ruleIDToDockerfileRule[dockerfileLintRules[i].RuleID] = &dockerfileLintRules[i] + } + tests := []struct { + description string + rules []RuleID + moduleAndSkaffoldYamls map[string]string + profiles []string + modules []string + dockerFromTo []docker.FromTo + shouldErr bool + err error + expected map[string]*[]Result + }{ + { + description: "verify DockerfileCopyOver1000Files rule works as intended", + rules: []RuleID{DockerfileCopyOver1000Files}, + moduleAndSkaffoldYamls: map[string]string{"cfg0": testSkaffoldYaml}, + modules: []string{"cfg0"}, + dockerFromTo: []docker.FromTo{ + { + From: ".", + To: "/", + ToIsDir: true, + StartLine: 3, + EndLine: 3, + }, + { + From: "local.txt", + To: "/container-dir", + ToIsDir: true, + StartLine: 3, + EndLine: 3, + }, + }, + expected: map[string]*[]Result{ + "cfg0": { + { + Rule: ruleIDToDockerfileRule[DockerfileCopyOver1000Files], + Line: 3, + Column: 1, + Explanation: `Found docker 'COPY' command where the source directory "." has over 1000 files. This has the potential ` + + `to dramatically slow 'skaffold dev' down as skaffold watches all sources files referenced in dockerfile COPY directives ` + + `for changes. If you notice skaffold rebuilding images unnecessarily when non-image-critical files are modified, consider ` + + `changing this to 'COPY $REQUIRED_SOURCE_FILE(s) /' for each required source file instead of or adding a .dockerignore file ` + + `(https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring non-image-critical files. skaffold respects ` + + `files ignored via the .dockerignore`, + }, + }, + }, + }, + { + description: "verify DockerfileCopyContainsGitDir rule works as intended", + rules: []RuleID{DockerfileCopyContainsGitDir}, + moduleAndSkaffoldYamls: map[string]string{"cfg0": testSkaffoldYaml}, + modules: []string{"cfg0"}, + dockerFromTo: []docker.FromTo{ + { + From: ".", + To: "/", + ToIsDir: true, + StartLine: 3, + EndLine: 3, + }, + { + From: "local.txt", + To: "/container-dir", + ToIsDir: true, + StartLine: 3, + EndLine: 3, + }, + }, + expected: map[string]*[]Result{ + "cfg0": { + { + Rule: ruleIDToDockerfileRule[DockerfileCopyContainsGitDir], + Line: 3, + Column: 1, + Explanation: `Found docker 'COPY' command where the source directory "." contains a '.git' directory at .git. This has the potential ` + + `to dramatically slow 'skaffold dev' down as skaffold will watch all of the files in the .git directory as skaffold watches all sources ` + + `files referenced in dockerfile COPY directives for changes. skaffold will likely rebuild images unnecessarily when non-image-critical ` + + `files are modified during any git related operation. Consider adding a .dockerignore file ` + + `(https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring the '.git' directory. skaffold respects files ignored ` + + `via the .dockerignore`, + }, + }, + }, + }, + { + rules: []RuleID{DockerfileCopyContainsGitDir}, + description: "invalid dockerfile file", + dockerFromTo: []docker.FromTo{ + { + From: "", + To: "", + }, + }, + moduleAndSkaffoldYamls: map[string]string{"cfg0": testSkaffoldYaml}, + shouldErr: true, + }, + } + for _, test := range tests { + testutil.Run(t, test.description, func(t *testutil.T) { + testRules := []Rule{} + for _, ruleID := range test.rules { + testRules = append(testRules, *(ruleIDToDockerfileRule[ruleID])) + } + t.Override(dockerfileRules, testRules) + t.Override(&realWorkDir, func() (string, error) { + return "", nil + }) + t.Override(&getDockerDependenciesForEachFromTo, func(ctx context.Context, buildCfg docker.BuildConfig, cfg docker.Config) (map[string][]string, error) { + deps := make([]string, 1001) + for i := 0; i < 1001; i++ { + deps[i] = fmt.Sprintf(".git/%d", i) + } + m := map[string][]string{} + for _, fromTo := range test.dockerFromTo { + if fromTo.From == "." { + m[fromTo.String()] = deps + continue + } + m[fromTo.String()] = []string{fromTo.From} + } + return m, nil + }) + t.Override(&readCopyCmdsFromDockerfile, func(ctx context.Context, onlyLastImage bool, absDockerfilePath, workspace string, buildArgs map[string]*string, cfg docker.Config) ([]docker.FromTo, error) { + return docker.ExtractOnlyCopyCommands(absDockerfilePath) + }) + tmpdir := t.TempDir() + configSet := parser.SkaffoldConfigSet{} + // iteration done to enforce result order + for i := 0; i < len(test.moduleAndSkaffoldYamls); i++ { + module := fmt.Sprintf("cfg%d", i) + skaffoldyamlText := test.moduleAndSkaffoldYamls[module] + fp := filepath.Join(tmpdir, fmt.Sprintf("%s.yaml", module)) + err := ioutil.WriteFile(fp, []byte(skaffoldyamlText), 0644) + if err != nil { + t.Fatalf("error creating skaffold.yaml file with name %s: %v", fp, err) + } + dfp := filepath.Join(tmpdir, "Dockerfile") + var b bytes.Buffer + tmpl, err := template.New("dockerfileText").Parse(testDockerfile) + if err != nil { + t.Fatalf("error parsing dockerfileText go template: %v", err) + } + err = tmpl.Execute(&b, test.dockerFromTo) + if err != nil { + t.Fatalf("error executing dockerfileText go template: %v", err) + } + err = ioutil.WriteFile(dfp, b.Bytes(), 0644) + if err != nil { + t.Fatalf("error creating Dockerfile %s: %v", dfp, err) + } + configSet = append(configSet, &parser.SkaffoldConfigEntry{SkaffoldConfig: &v1.SkaffoldConfig{ + Metadata: v1.Metadata{Name: module}, + Pipeline: v1.Pipeline{Build: v1.BuildConfig{Artifacts: []*v1.Artifact{{Workspace: "", + ArtifactType: v1.ArtifactType{DockerArtifact: &v1.DockerArtifact{DockerfilePath: dfp}}}}}}, + }, + SourceFile: fp, + }) + // test overwrites file paths for expected DockerfileRules as they are made dynamically + results := test.expected[module] + if results == nil { + continue + } + for i := range *results { + (*results)[i].AbsFilePath = dfp + (*results)[i].RelFilePath = dfp + } + } + t.Override(&getConfigSet, func(_ context.Context, opts config.SkaffoldOptions) (parser.SkaffoldConfigSet, error) { + // mock profile activation + var set parser.SkaffoldConfigSet + for _, c := range configSet { + if len(opts.ConfigurationFilter) > 0 && !stringslice.Contains(opts.ConfigurationFilter, c.Metadata.Name) { + continue + } + for _, pName := range opts.Profiles { + for _, profile := range c.Profiles { + if profile.Name != pName { + continue + } + c.Test = profile.Test + } + } + set = append(set, c) + } + return set, test.err + }) + results, err := GetDockerfilesLintResults(context.Background(), Options{ + OutFormat: "json", Modules: test.modules, Profiles: test.profiles}, &runcontext.RunContext{}) + t.CheckError(test.shouldErr, err) + if !test.shouldErr { + expectedResults := &[]Result{} + // this is done to enforce result order + for i := 0; i < len(test.expected); i++ { + *expectedResults = append(*expectedResults, *test.expected[fmt.Sprintf("cfg%d", i)]...) + (*expectedResults)[0].Rule.ExplanationPopulator = nil + (*expectedResults)[0].Rule.LintConditions = nil + } + + if results == nil { + t.CheckDeepEqual(expectedResults, results) + return + } + for i := 0; i < len(*results); i++ { + (*results)[i].Rule.ExplanationPopulator = nil + (*results)[i].Rule.LintConditions = nil + } + t.CheckDeepEqual(expectedResults, results) + } + }) + } +} diff --git a/pkg/skaffold/lint/lint.go b/pkg/skaffold/lint/lint.go index 320f03b03f9..e97e7868865 100644 --- a/pkg/skaffold/lint/lint.go +++ b/pkg/skaffold/lint/lint.go @@ -20,18 +20,24 @@ import ( "context" "io" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" ) var realWorkDir = util.RealWorkDir -func Lint(ctx context.Context, out io.Writer, opts Options) error { +func Lint(ctx context.Context, out io.Writer, opts Options, dockerCfg docker.Config) error { skaffoldYamlRuleList, err := GetSkaffoldYamlsLintResults(ctx, opts) if err != nil { return err } + dockerfileCommandRuleList, err := GetDockerfilesLintResults(ctx, opts, dockerCfg) + if err != nil { + return err + } results := []Result{} results = append(results, *skaffoldYamlRuleList...) + results = append(results, *dockerfileCommandRuleList...) // output flattened list if opts.OutFormat == JSONOutput { // need to remove some fields that cannot be serialized in the Rules of the Results diff --git a/pkg/skaffold/lint/linters.go b/pkg/skaffold/lint/linters.go index e3bac2b0d5a..02a7fe43dcc 100644 --- a/pkg/skaffold/lint/linters.go +++ b/pkg/skaffold/lint/linters.go @@ -23,11 +23,57 @@ import ( "regexp" "text/template" + "github.com/moby/buildkit/frontend/dockerfile/command" "sigs.k8s.io/kustomize/kyaml/yaml" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/output/log" ) +// for testing +var readCopyCmdsFromDockerfile = docker.ReadCopyCmdsFromDockerfile + +type DockerfileCommandLinter struct{} + +func (*DockerfileCommandLinter) Lint(params InputParams, rules *[]Rule) (*[]Result, error) { + results := &[]Result{} + fromTos, err := readCopyCmdsFromDockerfile(context.TODO(), false, params.ConfigFile.AbsPath, params.WorkspacePath, map[string]*string{}, params.DockerConfig) + if err != nil { + return nil, err + } + for _, rule := range *rules { + if rule.RuleType != DockerfileCommandLintRule { + continue + } + var dockerCommandFilter DockerCommandFilter + switch v := rule.Filter.(type) { + case DockerCommandFilter: + dockerCommandFilter = v + default: + return nil, fmt.Errorf("unknown filter type found for DockerfileCommandLinter lint rule: %v", rule) + } + // NOTE: ADD and COPY are both treated the same from the linter perspective - eg: if you have linter look at COPY src/dest it will also check ADD src/dest + if dockerCommandFilter.DockerCommand != command.Copy && dockerCommandFilter.DockerCommand != command.Add { + log.Entry(context.TODO()).Errorf("unsupported docker command found for DockerfileCommandLinter: %v", dockerCommandFilter.DockerCommand) + return nil, fmt.Errorf("unsupported docker command found for DockerfileCommandLinter: %v", dockerCommandFilter.DockerCommand) + } + for _, fromTo := range fromTos { + r, err := regexp.Compile(dockerCommandFilter.DockerCopySourceRegExp) + if err != nil { + return nil, err + } + if !r.MatchString(fromTo.From) { + continue + } + log.Entry(context.TODO()).Infof("docker command 'copy' match found for source: %s\n", fromTo.From) + // TODO(aaron-prindle) modify so that there are input and output params s.t. it is more obvious what fields need to be updated + params.DockerCopyCommandInfo = fromTo + appendRuleIfLintConditionsPass(params, results, rule, fromTo.StartLine, 1) + } + } + return results, nil +} + type RegExpLinter struct{} func (*RegExpLinter) Lint(lintInputs InputParams, rules *[]Rule) (*[]Result, error) { @@ -105,7 +151,12 @@ func (*YamlFieldLinter) Lint(lintInputs InputParams, rules *[]Rule) (*[]Result, } func appendRuleIfLintConditionsPass(lintInputs InputParams, results *[]Result, rule Rule, line, col int) { - allPassed := true + for _, f := range rule.LintConditions { + if !f(lintInputs) { + // lint condition failed, no rule is trigggered + return + } + } explanation := rule.ExplanationTemplate if rule.ExplanationPopulator != nil { ei, err := rule.ExplanationPopulator(lintInputs) @@ -128,23 +179,15 @@ func appendRuleIfLintConditionsPass(lintInputs InputParams, results *[]Result, r explanation = b.String() } - for _, f := range rule.LintConditions { - if !f(lintInputs) { - allPassed = false - break - } - } - if allPassed { - mr := Result{ - Rule: &rule, - Explanation: explanation, - AbsFilePath: lintInputs.ConfigFile.AbsPath, - RelFilePath: lintInputs.ConfigFile.RelPath, - Line: line, - Column: col, - } - *results = append(*results, mr) + mr := Result{ + Rule: &rule, + Explanation: explanation, + AbsFilePath: lintInputs.ConfigFile.AbsPath, + RelFilePath: lintInputs.ConfigFile.RelPath, + Line: line, + Column: col, } + *results = append(*results, mr) } func convert1DFileIndexTo2D(input string, idx int) (int, int) { diff --git a/pkg/skaffold/lint/output.go b/pkg/skaffold/lint/output.go index 90ee9950b87..5795ec43ced 100644 --- a/pkg/skaffold/lint/output.go +++ b/pkg/skaffold/lint/output.go @@ -83,7 +83,7 @@ func generatePlainTextOutput(res *Result) (string, error) { } // TODO(aaron-prindle) - support different template for multiline matches -> (no ColPointerLine, show multi line match) // if flagged text contains \n character, don't show colpointerline - tmpl, err := template.New("plainTextOutput").Parse("{{.RelFilePath}}:{{.LineNumber}}:{{.ColumnNumber}}: {{.RuleID}}: {{.Explanation}}: ({{.RuleType}})\n{{.FlaggedText}}\n{{.ColPointerLine}}") + tmpl, err := template.New("plainTextOutput").Parse("{{.RelFilePath}}:{{.LineNumber}}:{{.ColumnNumber}}: {{.RuleID}}: {{.RuleType}}: {{.Explanation}}\n{{.FlaggedText}}\n{{.ColPointerLine}}") if err != nil { return "", err } diff --git a/pkg/skaffold/lint/output_test.go b/pkg/skaffold/lint/output_test.go index 1b240c460be..cbaa23783b5 100644 --- a/pkg/skaffold/lint/output_test.go +++ b/pkg/skaffold/lint/output_test.go @@ -54,7 +54,7 @@ func TestLintOutput(t *testing.T) { }, }, text: "first column of this line should be flagged in the result [1,1]", - expected: "rel/path:1:1: ID000000: test explanation: (RegExpLintLintRule)\nfirst column of this line should be flagged in the result [1,1]\n^\n", + expected: "rel/path:1:1: ID000000: RegExpLintLintRule: test explanation\nfirst column of this line should be flagged in the result [1,1]\n^\n", }, { description: "verify json lint output is as expected", diff --git a/pkg/skaffold/lint/skaffoldyamls.go b/pkg/skaffold/lint/skaffoldyamls.go index e09ba112a1e..2d7a76e4b36 100644 --- a/pkg/skaffold/lint/skaffoldyamls.go +++ b/pkg/skaffold/lint/skaffoldyamls.go @@ -73,7 +73,7 @@ var skaffoldYamlLintRules = []Rule{ InvertMatch: true, }, ExplanationTemplate: "It is a skaffold best practice to specify a static port (vs skaffold dynamically choosing one) for port forwarding " + - "container based resources skaffold deploys. This is helpful because wit this the local ports are predictable across dev sessions which " + + "container based resources skaffold deploys. This is helpful because with this the local ports are predictable across dev sessions which " + " makes testing/debugging easier. It is recommended to add the following stanza at the end of your skaffold.yaml for each shown deployed resource:\n" + `portForward:{{range $k,$v := .FieldMap }} - resourceType: {{ $v.ResourceType }} @@ -105,6 +105,9 @@ var skaffoldYamlLintRules = []Rule{ ExplanationPopulator: func(lintInputs InputParams) (explanationInfo, error) { fieldMap := map[string]interface{}{} forwardedPorts := util.PortSet{} + if lintInputs.SkaffoldConfig.Deploy.KubectlDeploy == nil { + return explanationInfo{}, fmt.Errorf("expected kubectl deploy information to be populated but it was nil") + } for _, pattern := range lintInputs.SkaffoldConfig.Deploy.KubectlDeploy.Manifests { // NOTE: pattern is a pattern that can have wildcards, eg: leeroy-app/kubernetes/* if util.IsURL(pattern) { @@ -172,7 +175,6 @@ func GetSkaffoldYamlsLintResults(ctx context.Context, opts Options) (*[]Result, RelPath: strings.TrimPrefix(c.SourceFile, workdir), Text: string(b), } - results := []Result{} for _, r := range SkaffoldYamlLinters { recs, err := r.Lint(InputParams{ ConfigFile: skaffoldyaml, @@ -181,9 +183,8 @@ func GetSkaffoldYamlsLintResults(ctx context.Context, opts Options) (*[]Result, if err != nil { return nil, err } - results = append(results, *recs...) + l = append(l, *recs...) } - l = append(l, results...) } return &l, nil } diff --git a/pkg/skaffold/lint/skaffoldyamls_test.go b/pkg/skaffold/lint/skaffoldyamls_test.go index 76d87b7b8e5..81d63a63cb1 100644 --- a/pkg/skaffold/lint/skaffoldyamls_test.go +++ b/pkg/skaffold/lint/skaffoldyamls_test.go @@ -151,7 +151,7 @@ func TestGetSkaffoldYamlsLintResults(t *testing.T) { Line: 14, Column: 1, Explanation: "It is a skaffold best practice to specify a static port (vs skaffold dynamically choosing one) for port forwarding " + - "container based resources skaffold deploys. This is helpful because wit this the local ports are predictable across dev sessions which " + + "container based resources skaffold deploys. This is helpful because with this the local ports are predictable across dev sessions which " + " makes testing/debugging easier. It is recommended to add the following stanza at the end of your skaffold.yaml for each shown deployed resource:\n" + `portForward: - resourceType: deployment @@ -176,7 +176,9 @@ func TestGetSkaffoldYamlsLintResults(t *testing.T) { testRules = append(testRules, *(ruleIDToskaffoldYamlRule[ruleID])) } t.Override(skaffoldYamlRules, testRules) - + t.Override(&realWorkDir, func() (string, error) { + return "", nil + }) tmpdir := t.TempDir() configSet := parser.SkaffoldConfigSet{} // iteration done to enforce result order @@ -211,9 +213,6 @@ func TestGetSkaffoldYamlsLintResults(t *testing.T) { (*results)[i].RelFilePath = configSet[len(configSet)-1].SourceFile } } - t.Override(&realWorkDir, func() (string, error) { - return "", nil - }) t.Override(&getConfigSet, func(_ context.Context, opts config.SkaffoldOptions) (parser.SkaffoldConfigSet, error) { // mock profile activation var set parser.SkaffoldConfigSet @@ -244,6 +243,10 @@ func TestGetSkaffoldYamlsLintResults(t *testing.T) { (*expectedResults)[0].Rule.ExplanationPopulator = nil (*expectedResults)[0].Rule.LintConditions = nil } + if results == nil { + t.CheckDeepEqual(expectedResults, results) + return + } for i := 0; i < len(*results); i++ { (*results)[i].Rule.ExplanationPopulator = nil (*results)[i].Rule.LintConditions = nil diff --git a/pkg/skaffold/lint/types.go b/pkg/skaffold/lint/types.go index 48f93375479..cc9f1874973 100644 --- a/pkg/skaffold/lint/types.go +++ b/pkg/skaffold/lint/types.go @@ -22,6 +22,7 @@ import ( "go.lsp.dev/protocol" "sigs.k8s.io/kustomize/kyaml/yaml" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/parser" ) @@ -63,6 +64,12 @@ type Result struct { Column int } +type DockerCommandFilter struct { + DockerCommand string + DockerCopyDestRegExp string + DockerCopySourceRegExp string +} + type YamlFieldFilter struct { Filter yaml.Filter FieldOnly string @@ -80,10 +87,11 @@ type RuleType int const ( RegExpLintLintRule RuleType = iota YamlFieldLintRule + DockerfileCommandLintRule ) func (a RuleType) String() string { - return [...]string{"RegExpLintLintRule", "YamlFieldLintRule"}[a] + return [...]string{"RegExpLintLintRule", "YamlFieldLintRule", "DockerfileCommandLintRule"}[a] } type RuleID int @@ -94,6 +102,9 @@ const ( SkaffoldYamlAPIVersionOutOfDate SkaffoldYamlUseStaticPort SkaffoldYamlSyncPython + + DockerfileCopyOver1000Files + DockerfileCopyContainsGitDir ) func (a RuleID) String() string { @@ -101,9 +112,13 @@ func (a RuleID) String() string { } type InputParams struct { - ConfigFile ConfigFile - DockerfileToDepMap map[string][]string - SkaffoldConfig *parser.SkaffoldConfigEntry + ConfigFile ConfigFile + DockerfileToDepMap map[string][]string + DockerfileToFromToToDeps map[string]map[string][]string + SkaffoldConfig *parser.SkaffoldConfigEntry + DockerCopyCommandInfo docker.FromTo + WorkspacePath string + DockerConfig docker.Config } type Linter interface {