diff --git a/.gitignore b/.gitignore index 92e47edbe..69a329e8a 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,11 @@ cmd/external-plugins/cherrypicker/cherrypicker cmd/external-plugins/needs-rebase/needs-rebase cmd/external-plugins/refresh/refresh cmd/ghproxy/ghproxy +pj.yaml +pod.yaml +*.min.js +vendor/ + # Generated files by hugo site/public/ @@ -77,4 +82,4 @@ hugo.linux # Folders of common IDEs .idea/ -.vscode/ +.vscode/ \ No newline at end of file diff --git a/cmd/crier/main.go b/cmd/crier/main.go index 0a21964ae..b45a5165a 100644 --- a/cmd/crier/main.go +++ b/cmd/crier/main.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/prow/pkg/config" "sigs.k8s.io/prow/pkg/config/secret" "sigs.k8s.io/prow/pkg/crier" + dingtalkreporter "sigs.k8s.io/prow/pkg/crier/reporters/dingtalk" gcsreporter "sigs.k8s.io/prow/pkg/crier/reporters/gcs" k8sgcsreporter "sigs.k8s.io/prow/pkg/crier/reporters/gcs/kubernetes" gerritreporter "sigs.k8s.io/prow/pkg/crier/reporters/gerrit" @@ -66,6 +67,7 @@ type options struct { blobStorageWorkers int k8sBlobStorageWorkers int resultStoreWorkers int + dingTalkWorkers int slackTokenFile string additionalSlackTokenFiles slackclient.HostsFlag @@ -83,7 +85,7 @@ type options struct { } func (o *options) validate() error { - if o.gerritWorkers+o.pubsubWorkers+o.githubWorkers+o.slackWorkers+o.blobStorageWorkers+o.k8sBlobStorageWorkers+o.resultStoreWorkers <= 0 { + if o.gerritWorkers+o.pubsubWorkers+o.githubWorkers+o.slackWorkers+o.blobStorageWorkers+o.k8sBlobStorageWorkers+o.resultStoreWorkers+o.dingTalkWorkers <= 0 { return errors.New("crier need to have at least one report worker to start") } @@ -127,6 +129,7 @@ func (o *options) parseArgs(fs *flag.FlagSet, args []string) error { fs.IntVar(&o.pubsubWorkers, "pubsub-workers", 0, "Number of pubsub report workers (0 means disabled)") fs.IntVar(&o.githubWorkers, "github-workers", 0, "Number of github report workers (0 means disabled)") fs.IntVar(&o.slackWorkers, "slack-workers", 0, "Number of Slack report workers (0 means disabled)") + fs.IntVar(&o.dingTalkWorkers, "dingtalk-workers", 0, "Number of DingTalk report workers (0 means disabled)") fs.Var(&o.additionalSlackTokenFiles, "additional-slack-token-files", "Map of additional slack token files. example: --additional-slack-token-files=foo=/etc/foo-slack-tokens/token, repeat flag for each host") fs.IntVar(&o.blobStorageWorkers, "blob-storage-workers", 0, "Number of blob storage report workers (0 means disabled)") fs.IntVar(&o.k8sBlobStorageWorkers, "kubernetes-blob-storage-workers", 0, "Number of Kubernetes-specific blob storage report workers (0 means disabled)") @@ -313,6 +316,20 @@ func main() { } } + if o.dingTalkWorkers > 0 { + hasReporter = true + if cfg().DingTalkReporterConfigs == nil { + logrus.Fatal("dingtalkreporter is enabled but has no config") + } + dingTalkConfig := func(refs *prowapi.Refs) config.DingTalkReporter { + return cfg().DingTalkReporterConfigs.GetDingTalkReporter(refs) + } + dingTalkReporter := dingtalkreporter.New(dingTalkConfig, o.dryrun) + if err := crier.New(mgr, dingTalkReporter, o.dingTalkWorkers, o.githubEnablement.EnablementChecker()); err != nil { + logrus.WithError(err).Fatal("failed to construct slack reporter controller") + } + } + if !hasReporter { logrus.Fatalf("should have at least one controller to start crier.") } diff --git a/cmd/crier/main_test.go b/cmd/crier/main_test.go index 96c058f32..f5b214f12 100644 --- a/cmd/crier/main_test.go +++ b/cmd/crier/main_test.go @@ -146,6 +146,42 @@ func TestOptions(t *testing.T) { instrumentationOptions: flagutil.DefaultInstrumentationOptions(), }, }, + //DingTalk Reporter + { + name: "dingTalk workers, sets workers", + args: []string{"--dingtalk-workers=13", "--config-path=foo"}, + expected: &options{ + dingTalkWorkers: 13, + config: configflagutil.ConfigOptions{ + ConfigPathFlagName: "config-path", + JobConfigPathFlagName: "job-config-path", + ConfigPath: "foo", + SupplementalProwConfigsFileNameSuffix: "_prowconfig.yaml", + InRepoConfigCacheSize: 200, + }, + github: defaultGitHubOptions, + k8sReportFraction: 1.0, + instrumentationOptions: flagutil.DefaultInstrumentationOptions(), + }, + }, + { + name: "dingTalk with --dry-run, sets", + args: []string{"--dingtalk-workers=13", "--config-path=foo", "--dry-run"}, + expected: &options{ + dingTalkWorkers: 13, + config: configflagutil.ConfigOptions{ + ConfigPathFlagName: "config-path", + JobConfigPathFlagName: "job-config-path", + ConfigPath: "foo", + SupplementalProwConfigsFileNameSuffix: "_prowconfig.yaml", + InRepoConfigCacheSize: 200, + }, + dryrun: true, + github: defaultGitHubOptions, + k8sReportFraction: 1.0, + instrumentationOptions: flagutil.DefaultInstrumentationOptions(), + }, + }, { name: "k8s-gcs enables k8s-gcs", args: []string{"--kubernetes-blob-storage-workers=3", "--config-path=foo"}, diff --git a/config/prow/cluster/prowjob-crd/prowjob_customresourcedefinition.yaml b/config/prow/cluster/prowjob-crd/prowjob_customresourcedefinition.yaml index 6e1c4f251..0e4c73fcd 100644 --- a/config/prow/cluster/prowjob-crd/prowjob_customresourcedefinition.yaml +++ b/config/prow/cluster/prowjob-crd/prowjob_customresourcedefinition.yaml @@ -9633,6 +9633,30 @@ spec: reporter_config: description: ReporterConfig holds reporter-specific configuration properties: + dingtalk: + properties: + job_states_to_report: + items: + description: ProwJobState specifies whether the job is running + type: string + type: array + report: + description: |- + Report is derived from JobStatesToReport, it's used for differentiating + nil from empty slice, as yaml roundtrip by design can't tell the + difference when omitempty is supplied. + See https://github.com/kubernetes/test-infra/pull/24168 for details + Priority-wise, it goes by following order: + - `report: true/false`` in job config + - `JobStatesToReport: ` in job config + - `report: true/false`` in global config + - `JobStatesToReport:` in global config + type: boolean + report_template: + type: string + token: + type: string + type: object slack: properties: channel: diff --git a/pkg/apis/prowjobs/v1/types.go b/pkg/apis/prowjobs/v1/types.go index 620595b8c..4cfd24e2f 100644 --- a/pkg/apis/prowjobs/v1/types.go +++ b/pkg/apis/prowjobs/v1/types.go @@ -357,6 +357,8 @@ func (rac *RerunAuthConfig) IsAllowAnyone() bool { type ReporterConfig struct { Slack *SlackReporterConfig `json:"slack,omitempty"` + + DingTalk *DingTalkReporterConfig `json:"dingtalk,omitempty"` } type SlackReporterConfig struct { @@ -410,6 +412,53 @@ func (src *SlackReporterConfig) ApplyDefault(def *SlackReporterConfig) *SlackRep return &merged } +type DingTalkReporterConfig struct { + Token string `json:"token,omitempty"` + JobStatesToReport []ProwJobState `json:"job_states_to_report,omitempty"` + ReportTemplate string `json:"report_template,omitempty"` + // Report is derived from JobStatesToReport, it's used for differentiating + // nil from empty slice, as yaml roundtrip by design can't tell the + // difference when omitempty is supplied. + // See https://github.com/kubernetes/test-infra/pull/24168 for details + // Priority-wise, it goes by following order: + // - `report: true/false`` in job config + // - `JobStatesToReport: ` in job config + // - `report: true/false`` in global config + // - `JobStatesToReport:` in global config + Report *bool `json:"report,omitempty"` +} + +// / ApplyDefault is called by jobConfig.ApplyDefault(globalConfig) +func (src *DingTalkReporterConfig) ApplyDefault(def *DingTalkReporterConfig) *DingTalkReporterConfig { + if src == nil && def == nil { + return nil + } + var merged DingTalkReporterConfig + if src != nil { + merged = *src.DeepCopy() + } else { + merged = *def.DeepCopy() + } + if src == nil || def == nil { + return &merged + } + + if merged.Token == "" { + merged.Token = def.Token + } + // Note: `job_states_to_report: []` also results in JobStatesToReport == nil + if merged.JobStatesToReport == nil { + merged.JobStatesToReport = def.JobStatesToReport + } + if merged.ReportTemplate == "" { + merged.ReportTemplate = def.ReportTemplate + } + if merged.Report == nil { + merged.Report = def.Report + } + return &merged +} + // Duration is a wrapper around time.Duration that parses times in either // 'integer number of nanoseconds' or 'duration string' formats and serializes // to 'duration string' format. diff --git a/pkg/apis/prowjobs/v1/types_test.go b/pkg/apis/prowjobs/v1/types_test.go index ca535b39a..247e13631 100644 --- a/pkg/apis/prowjobs/v1/types_test.go +++ b/pkg/apis/prowjobs/v1/types_test.go @@ -465,6 +465,35 @@ func TestSlackConfigApplyDefaultsAppliesDefaultsForAllFields(t *testing.T) { } } +func TestDingTalkConfigApplyDefaultsAppliesDefaultsForAllFields(t *testing.T) { + t.Parallel() + seed := time.Now().UnixNano() + // Print the seed so failures can easily be reproduced + t.Logf("Seed: %d", seed) + fuzzer := fuzz.NewWithSeed(seed) + for i := 0; i < 100; i++ { + t.Run(strconv.Itoa(i), func(t *testing.T) { + def := &DingTalkReporterConfig{} + fuzzer.Fuzz(def) + + // Each of those three has its own DeepCopy and in case it is nil, + // we just call that and return. In order to make this test verify + // that copying of their fields also works, we have to set them to + // something non-nil. + toDefault := &DingTalkReporterConfig{ + Token: "", + JobStatesToReport: nil, + ReportTemplate: "", + } + defaulted := toDefault.ApplyDefault(def) + + if diff := cmp.Diff(def, defaulted); diff != "" { + t.Errorf("defaulted decoration config didn't get all fields defaulted: %s", diff) + } + }) + } +} + func TestRefsToString(t *testing.T) { var tests = []struct { name string diff --git a/pkg/apis/prowjobs/v1/zz_generated.deepcopy.go b/pkg/apis/prowjobs/v1/zz_generated.deepcopy.go index 4c4f70b22..545b1976e 100644 --- a/pkg/apis/prowjobs/v1/zz_generated.deepcopy.go +++ b/pkg/apis/prowjobs/v1/zz_generated.deepcopy.go @@ -222,6 +222,32 @@ func (in *DecorationConfig) DeepCopy() *DecorationConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DingTalkReporterConfig) DeepCopyInto(out *DingTalkReporterConfig) { + *out = *in + if in.JobStatesToReport != nil { + in, out := &in.JobStatesToReport, &out.JobStatesToReport + *out = make([]ProwJobState, len(*in)) + copy(*out, *in) + } + if in.Report != nil { + in, out := &in.Report, &out.Report + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DingTalkReporterConfig. +func (in *DingTalkReporterConfig) DeepCopy() *DingTalkReporterConfig { + if in == nil { + return nil + } + out := new(DingTalkReporterConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Duration) DeepCopyInto(out *Duration) { *out = *in @@ -583,6 +609,11 @@ func (in *ReporterConfig) DeepCopyInto(out *ReporterConfig) { *out = new(SlackReporterConfig) (*in).DeepCopyInto(*out) } + if in.DingTalk != nil { + in, out := &in.DingTalk, &out.DingTalk + *out = new(DingTalkReporterConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/config/config.go b/pkg/config/config.go index e3d68c023..4aff09cd9 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -86,8 +86,10 @@ const ( ) var ( - DefaultDiffOpts []cmp.Option = []cmp.Option{cmpopts.IgnoreFields(TideBranchMergeType{}, "Regexpr"), - cmpopts.IgnoreUnexported(Gerrit{})} + DefaultDiffOpts []cmp.Option = []cmp.Option{ + cmpopts.IgnoreFields(TideBranchMergeType{}, "Regexpr"), + cmpopts.IgnoreUnexported(Gerrit{}), + } ) // Config is a read-only snapshot of the config. @@ -140,17 +142,18 @@ type JobConfig struct { // ProwConfig is config for all prow controllers. type ProwConfig struct { // The git sha from which this config was generated. - ConfigVersionSHA string `json:"config_version_sha,omitempty"` - Tide Tide `json:"tide,omitempty"` - Plank Plank `json:"plank,omitempty"` - Sinker Sinker `json:"sinker,omitempty"` - Deck Deck `json:"deck,omitempty"` - BranchProtection BranchProtection `json:"branch-protection"` - Gerrit Gerrit `json:"gerrit"` - GitHubReporter GitHubReporter `json:"github_reporter"` - Horologium Horologium `json:"horologium"` - SlackReporterConfigs SlackReporterConfigs `json:"slack_reporter_configs,omitempty"` - InRepoConfig InRepoConfig `json:"in_repo_config"` + ConfigVersionSHA string `json:"config_version_sha,omitempty"` + Tide Tide `json:"tide,omitempty"` + Plank Plank `json:"plank,omitempty"` + Sinker Sinker `json:"sinker,omitempty"` + Deck Deck `json:"deck,omitempty"` + BranchProtection BranchProtection `json:"branch-protection"` + Gerrit Gerrit `json:"gerrit"` + GitHubReporter GitHubReporter `json:"github_reporter"` + Horologium Horologium `json:"horologium"` + SlackReporterConfigs SlackReporterConfigs `json:"slack_reporter_configs,omitempty"` + DingTalkReporterConfigs DingTalkReporterConfigs `json:"dingtalk_reporter_configs,omitempty"` + InRepoConfig InRepoConfig `json:"in_repo_config"` // Gangway contains configurations needed by the the Prow API server of the // same name. It encodes an allowlist of API clients and what kinds of Prow @@ -334,7 +337,11 @@ type refGetterForGitHubPullRequestClient interface { } // NewRefGetterForGitHubPullRequest returns a brand new RefGetterForGitHubPullRequest. -func NewRefGetterForGitHubPullRequest(ghc refGetterForGitHubPullRequestClient, org, repo string, number int) *RefGetterForGitHubPullRequest { +func NewRefGetterForGitHubPullRequest( + ghc refGetterForGitHubPullRequestClient, + org, repo string, + number int, +) *RefGetterForGitHubPullRequest { return &RefGetterForGitHubPullRequest{ ghc: ghc, org: org, @@ -413,7 +420,8 @@ func (rg *RefGetterForGitHubPullRequest) BaseSHA() (string, error) { // retrieval of a *ProwYAML. func GetAndCheckRefs( baseSHAGetter RefGetter, - headSHAGetters ...RefGetter) (string, []string, error) { + headSHAGetters ...RefGetter, +) (string, []string, error) { // Parse "baseSHAGetter". baseSHA, err := baseSHAGetter() @@ -442,7 +450,12 @@ func GetAndCheckRefs( // does a call to GitHub and who also need the result of that GitHub call just // keep a pointer to its result, but must nilcheck that pointer before accessing // it. -func (c *Config) getProwYAMLWithDefaults(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) (*ProwYAML, error) { +func (c *Config) getProwYAMLWithDefaults( + gc git.ClientFactory, + identifier, baseBranch string, + baseSHAGetter RefGetter, + headSHAGetters ...RefGetter, +) (*ProwYAML, error) { if identifier == "" { return nil, errors.New("no identifier for repo given") } @@ -464,7 +477,12 @@ func (c *Config) getProwYAMLWithDefaults(gc git.ClientFactory, identifier, baseB } // getProwYAML is like getProwYAMLWithDefaults, minus the defaulting logic. -func (c *Config) getProwYAML(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) (*ProwYAML, error) { +func (c *Config) getProwYAML( + gc git.ClientFactory, + identifier, baseBranch string, + baseSHAGetter RefGetter, + headSHAGetters ...RefGetter, +) (*ProwYAML, error) { if identifier == "" { return nil, errors.New("no identifier for repo given") } @@ -491,7 +509,12 @@ func (c *Config) getProwYAML(gc git.ClientFactory, identifier, baseBranch string // Consumers that pass in a RefGetter implementation that does a call to GitHub and who // also need the result of that GitHub call just keep a pointer to its result, but must // nilcheck that pointer before accessing it. -func (c *Config) GetPresubmits(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) ([]Presubmit, error) { +func (c *Config) GetPresubmits( + gc git.ClientFactory, + identifier, baseBranch string, + baseSHAGetter RefGetter, + headSHAGetters ...RefGetter, +) ([]Presubmit, error) { prowYAML, err := c.getProwYAMLWithDefaults(gc, identifier, baseBranch, baseSHAGetter, headSHAGetters...) if err != nil { return nil, err @@ -521,7 +544,12 @@ func (c *Config) GetPresubmitsStatic(identifier string) []Presubmit { // Consumers that pass in a RefGetter implementation that does a call to GitHub and who // also need the result of that GitHub call just keep a pointer to its result, but must // nilcheck that pointer before accessing it. -func (c *Config) GetPostsubmits(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) ([]Postsubmit, error) { +func (c *Config) GetPostsubmits( + gc git.ClientFactory, + identifier, baseBranch string, + baseSHAGetter RefGetter, + headSHAGetters ...RefGetter, +) ([]Postsubmit, error) { prowYAML, err := c.getProwYAMLWithDefaults(gc, identifier, baseBranch, baseSHAGetter, headSHAGetters...) if err != nil { return nil, err @@ -795,7 +823,10 @@ func (d *DefaultRerunAuthConfigEntry) matches(repo, cluster string) bool { // mergeProwJobDefault finds all matching ProwJobDefaultEntry // for a job and merges them sequentially before merging into the job's own // PrwoJobDefault. Configs merged later override values from earlier configs. -func (pc *ProwConfig) mergeProwJobDefault(repo, cluster string, jobDefault *prowapi.ProwJobDefault) *prowapi.ProwJobDefault { +func (pc *ProwConfig) mergeProwJobDefault( + repo, cluster string, + jobDefault *prowapi.ProwJobDefault, +) *prowapi.ProwJobDefault { var merged *prowapi.ProwJobDefault for _, entry := range pc.ProwJobDefaultEntries { if entry.matches(repo, cluster) { @@ -815,7 +846,10 @@ func (pc *ProwConfig) mergeProwJobDefault(repo, cluster string, jobDefault *prow // mergeDefaultDecorationConfig finds all matching DefaultDecorationConfigEntry // for a job and merges them sequentially before merging into the job's own // DecorationConfig. Configs merged later override values from earlier configs. -func (p *Plank) mergeDefaultDecorationConfig(repo, cluster string, jobDC *prowapi.DecorationConfig) *prowapi.DecorationConfig { +func (p *Plank) mergeDefaultDecorationConfig( + repo, cluster string, + jobDC *prowapi.DecorationConfig, +) *prowapi.DecorationConfig { var merged *prowapi.DecorationConfig for _, entry := range p.DefaultDecorationConfigs { if entry.matches(repo, cluster) { @@ -845,7 +879,10 @@ func (p *Plank) GuessDefaultDecorationConfig(repo, cluster string) *prowapi.Deco // GuessDefaultDecorationConfig attempts to find the resolved default decoration // config for a given repo, cluster and job DecorationConfig. It is primarily used for best effort // guesses about GCS configuration for undecorated jobs. -func (p *Plank) GuessDefaultDecorationConfigWithJobDC(repo, cluster string, jobDC *prowapi.DecorationConfig) *prowapi.DecorationConfig { +func (p *Plank) GuessDefaultDecorationConfigWithJobDC( + repo, cluster string, + jobDC *prowapi.DecorationConfig, +) *prowapi.DecorationConfig { return p.mergeDefaultDecorationConfig(repo, cluster, jobDC) } @@ -855,11 +892,13 @@ func (p *Plank) GuessDefaultDecorationConfigWithJobDC(repo, cluster string, jobD func defaultDecorationMapToSlice(m map[string]*prowapi.DecorationConfig) []*DefaultDecorationConfigEntry { var entries []*DefaultDecorationConfigEntry add := func(repo string, dc *prowapi.DecorationConfig) { - entries = append(entries, &DefaultDecorationConfigEntry{ - OrgRepo: repo, - Cluster: "", - Config: dc, - }) + entries = append( + entries, &DefaultDecorationConfigEntry{ + OrgRepo: repo, + Cluster: "", + Config: dc, + }, + ) } // Ensure "*" comes first... if dc, ok := m["*"]; ok { @@ -1270,7 +1309,10 @@ type Deck struct { // Validate performs validation and sanitization on the Deck object. func (d *Deck) Validate() error { if len(d.AdditionalAllowedBuckets) > 0 && !d.shouldValidateStorageBuckets() { - return fmt.Errorf("deck.skip_storage_path_validation is enabled despite deck.additional_allowed_buckets being configured: %v", d.AdditionalAllowedBuckets) + return fmt.Errorf( + "deck.skip_storage_path_validation is enabled despite deck.additional_allowed_buckets being configured: %v", + d.AdditionalAllowedBuckets, + ) } for k, config := range d.DefaultRerunAuthConfigs { @@ -1287,7 +1329,10 @@ type notAllowedBucketError struct { } func (ne notAllowedBucketError) Error() string { - return fmt.Sprintf("bucket not in allowed list; you may allow it by including it in `deck.additional_allowed_buckets`: %s", ne.err.Error()) + return fmt.Sprintf( + "bucket not in allowed list; you may allow it by including it in `deck.additional_allowed_buckets`: %s", + ne.err.Error(), + ) } func (notAllowedBucketError) Is(err error) bool { @@ -1317,7 +1362,13 @@ func (c *Config) ValidateStorageBucket(bucketName string) error { bucketName = alias } if !c.Deck.AllKnownStorageBuckets.Has(bucketName) { - return NotAllowedBucketError(fmt.Errorf("bucket %q not in allowed list (%v)", bucketName, sets.List(c.Deck.AllKnownStorageBuckets))) + return NotAllowedBucketError( + fmt.Errorf( + "bucket %q not in allowed list (%v)", + bucketName, + sets.List(c.Deck.AllKnownStorageBuckets), + ), + ) } return nil } @@ -1448,11 +1499,13 @@ func defaultRerunAuthMapToSlice(m map[string]prowapi.RerunAuthConfig) ([]*Defaul mLength := len(m) var entries []*DefaultRerunAuthConfigEntry add := func(repo string, rac prowapi.RerunAuthConfig) { - entries = append(entries, &DefaultRerunAuthConfigEntry{ - OrgRepo: repo, - Cluster: "", - Config: &rac, - }) + entries = append( + entries, &DefaultRerunAuthConfigEntry{ + OrgRepo: repo, + Cluster: "", + Config: &rac, + }, + ) } // Ensure "" comes first... @@ -1634,25 +1687,137 @@ func (cfg *SlackReporter) DefaultAndValidate() error { return nil } +// DingTalkReporter represents the config for the DingTalk reporter. The token can be overridden +// on the job via the .reporter_config.ding_talk.token property. +type DingTalkReporter struct { + JobTypesToReport []prowapi.ProwJobType `json:"job_types_to_report,omitempty"` + prowapi.DingTalkReporterConfig `json:",inline"` +} + +// DingTalkReporterConfigs represents the config for the Slack reporter(s). +// Use `org/repo`, `org` or `*` as key and an `DingTalkReporter` struct as value. +type DingTalkReporterConfigs map[string]DingTalkReporter + +func (cfg DingTalkReporterConfigs) mergeFrom(additional *DingTalkReporterConfigs) error { + if additional == nil { + return nil + } + + var errs []error + for orgOrRepo, dingTalkReporter := range *additional { + if _, alreadyConfigured := cfg[orgOrRepo]; alreadyConfigured { + errs = append(errs, fmt.Errorf("config for org or repo %s passed more than once", orgOrRepo)) + continue + } + cfg[orgOrRepo] = dingTalkReporter + } + + return utilerrors.NewAggregate(errs) +} + +func (cfg DingTalkReporterConfigs) GetDingTalkReporter(refs *prowapi.Refs) DingTalkReporter { + if refs == nil { + return cfg["*"] + } + + if dingtalk, ok := cfg[fmt.Sprintf("%s/%s", refs.Org, refs.Repo)]; ok { + return dingtalk + } + + if dingtalk, ok := cfg[refs.Org]; ok { + return dingtalk + } + + return cfg["*"] +} + +func (cfg DingTalkReporterConfigs) HasGlobalConfig() bool { + _, exists := cfg["*"] + return exists +} + +func (cfg *DingTalkReporter) DefaultAndValidate() error { + // Default ReportTemplate. + if cfg.ReportTemplate == "" { + cfg.ReportTemplate = `{{ $repo := "" }}{{with .Spec.Refs}}{{$repo = .Repo}}{{end}}{{if eq $repo ""}}{{if .Spec.ExtraRefs}}{{with index .Spec.ExtraRefs 0}}{{$repo = .Repo}}{{end}}{{end}}{{end}}## Repo: {{ $repo }} +--- +- Job: {{.Spec.Job}} +- Type: {{.Spec.Type}} +- State: {{if eq .Status.State "triggered"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "pending"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "success"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "failure"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "aborted"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "error"}}**{{.Status.State}}**{{end}} +- Log: [View logs]({{.Status.URL}})` + } + + if cfg.Token == "" { + return errors.New("token must be set") + } + + // Validate ReportTemplate. + tmpl, err := template.New("").Parse(cfg.ReportTemplate) + if err != nil { + return fmt.Errorf("failed to parse template: %w", err) + } + if err := tmpl.Execute(&bytes.Buffer{}, &prowapi.ProwJob{}); err != nil { + return fmt.Errorf("failed to execute report_template: %w", err) + } + + return nil +} + // Load loads and parses the config at path. -func Load(prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { - return loadWithYamlOpts(nil, prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, additionals...) +func Load( + prowConfig, jobConfig string, + supplementalProwConfigDirs []string, + supplementalProwConfigsFileNameSuffix string, + additionals ...func(*Config) error, +) (c *Config, err error) { + return loadWithYamlOpts( + nil, + prowConfig, + jobConfig, + supplementalProwConfigDirs, + supplementalProwConfigsFileNameSuffix, + additionals..., + ) } // LoadStrict loads and parses the config at path. // Unlike Load it unmarshalls yaml with strict parsing. -func LoadStrict(prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { - return loadWithYamlOpts([]yaml.JSONOpt{yaml.DisallowUnknownFields}, prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, additionals...) -} - -func loadWithYamlOpts(yamlOpts []yaml.JSONOpt, prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { +func LoadStrict( + prowConfig, jobConfig string, + supplementalProwConfigDirs []string, + supplementalProwConfigsFileNameSuffix string, + additionals ...func(*Config) error, +) (c *Config, err error) { + return loadWithYamlOpts( + []yaml.JSONOpt{yaml.DisallowUnknownFields}, + prowConfig, + jobConfig, + supplementalProwConfigDirs, + supplementalProwConfigsFileNameSuffix, + additionals..., + ) +} + +func loadWithYamlOpts( + yamlOpts []yaml.JSONOpt, + prowConfig, jobConfig string, + supplementalProwConfigDirs []string, + supplementalProwConfigsFileNameSuffix string, + additionals ...func(*Config) error, +) (c *Config, err error) { // we never want config loading to take down the prow components. defer func() { if r := recover(); r != nil { c, err = nil, fmt.Errorf("panic loading config: %v\n%s", r, string(debug.Stack())) } }() - c, err = loadConfig(prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, yamlOpts...) + c, err = loadConfig( + prowConfig, + jobConfig, + supplementalProwConfigDirs, + supplementalProwConfigsFileNameSuffix, + yamlOpts..., + ) if err != nil { return nil, err } @@ -1702,67 +1867,80 @@ func ReadJobConfig(jobConfig string, yamlOpts ...yaml.JSONOpt) (JobConfig, error allStart := time.Now() jc := JobConfig{} var errs []error - err = filepath.Walk(jobConfig, func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.WithError(err).Errorf("walking path %q.", path) - // bad file should not stop us from parsing the directory. - return nil - } + err = filepath.Walk( + jobConfig, func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.WithError(err).Errorf("walking path %q.", path) + // bad file should not stop us from parsing the directory. + return nil + } - if strings.HasPrefix(info.Name(), "..") { - // kubernetes volumes also include files we - // should not look be looking into for keys. - if info.IsDir() { - return filepath.SkipDir + if strings.HasPrefix(info.Name(), "..") { + // kubernetes volumes also include files we + // should not look be looking into for keys. + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + if filepath.Ext(path) != ".yaml" && filepath.Ext(path) != ".yml" { + return nil + } + // Use 'Match' directly because 'Ignore' and 'Include' don't work properly for repositories. + match := prowIgnore.Match(path) + if match != nil && match.Ignore() { + return nil } - return nil - } - if filepath.Ext(path) != ".yaml" && filepath.Ext(path) != ".yml" { - return nil - } - // Use 'Match' directly because 'Ignore' and 'Include' don't work properly for repositories. - match := prowIgnore.Match(path) - if match != nil && match.Ignore() { - return nil - } - if info.IsDir() { - return nil - } + if info.IsDir() { + return nil + } - base := filepath.Base(path) - if uniqueBasenames.Has(base) { - errs = append(errs, fmt.Errorf("duplicated basename is not allowed: %s", base)) - return nil - } - uniqueBasenames.Insert(base) + base := filepath.Base(path) + if uniqueBasenames.Has(base) { + errs = append(errs, fmt.Errorf("duplicated basename is not allowed: %s", base)) + return nil + } + uniqueBasenames.Insert(base) - fileStart := time.Now() - var subConfig JobConfig - if err := yamlToConfig(path, &subConfig, yamlOpts...); err != nil { - errs = append(errs, err) + fileStart := time.Now() + var subConfig JobConfig + if err := yamlToConfig(path, &subConfig, yamlOpts...); err != nil { + errs = append(errs, err) + return nil + } + jc, err = mergeJobConfigs(jc, subConfig) + if err == nil { + logrus.WithField("jobConfig", path).WithField( + "duration", + time.Since(fileStart), + ).Traceln("config loaded") + jobConfigCount++ + } else { + errs = append(errs, err) + } return nil - } - jc, err = mergeJobConfigs(jc, subConfig) - if err == nil { - logrus.WithField("jobConfig", path).WithField("duration", time.Since(fileStart)).Traceln("config loaded") - jobConfigCount++ - } else { - errs = append(errs, err) - } - return nil - }) + }, + ) err = utilerrors.NewAggregate(append(errs, err)) if err != nil { return JobConfig{}, err } - logrus.WithField("count", jobConfigCount).WithField("duration", time.Since(allStart)).Traceln("jobConfigs loaded successfully") + logrus.WithField("count", jobConfigCount).WithField( + "duration", + time.Since(allStart), + ).Traceln("jobConfigs loaded successfully") return jc, nil } // loadConfig loads one or multiple config files and returns a config object. -func loadConfig(prowConfig, jobConfig string, additionalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, yamlOpts ...yaml.JSONOpt) (*Config, error) { +func loadConfig( + prowConfig, jobConfig string, + additionalProwConfigDirs []string, + supplementalProwConfigsFileNameSuffix string, + yamlOpts ...yaml.JSONOpt, +) (*Config, error) { stat, err := os.Stat(prowConfig) if err != nil { return nil, err @@ -1781,49 +1959,59 @@ func loadConfig(prowConfig, jobConfig string, additionalProwConfigDirs []string, allStart := time.Now() for _, additionalProwConfigDir := range additionalProwConfigDirs { var errs []error - errs = append(errs, filepath.Walk(additionalProwConfigDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - // Finish walking and handle all errors in bulk at the end, otherwise this is annoying as a user. - errs = append(errs, err) - return nil - } - // Kubernetes configmap mounts create symlinks for the configmap keys that point to files prefixed with '..'. - // This allows it to do atomic changes by changing the symlink to a new target when the configmap content changes. - // This means that we should ignore the '..'-prefixed files, otherwise we might end up reading a half-written file and will - // get duplicate data. - if strings.HasPrefix(info.Name(), "..") { - if info.IsDir() { - return filepath.SkipDir - } - return nil - } - - if info.IsDir() || !strings.HasSuffix(path, supplementalProwConfigsFileNameSuffix) { - return nil - } - - fileStart := time.Now() - var cfg ProwConfig - if err := yamlToConfig(path, &cfg); err != nil { - errs = append(errs, err) - return nil - } - - if err := nc.ProwConfig.mergeFrom(&cfg); err != nil { - errs = append(errs, fmt.Errorf("failed to merge in config from %s: %w", path, err)) - } else { - logrus.WithField("prowConfig", path).WithField("duration", time.Since(fileStart)).Traceln("config loaded") - prowConfigCount++ - } - - return nil - })) + errs = append( + errs, filepath.Walk( + additionalProwConfigDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + // Finish walking and handle all errors in bulk at the end, otherwise this is annoying as a user. + errs = append(errs, err) + return nil + } + // Kubernetes configmap mounts create symlinks for the configmap keys that point to files prefixed with '..'. + // This allows it to do atomic changes by changing the symlink to a new target when the configmap content changes. + // This means that we should ignore the '..'-prefixed files, otherwise we might end up reading a half-written file and will + // get duplicate data. + if strings.HasPrefix(info.Name(), "..") { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + if info.IsDir() || !strings.HasSuffix(path, supplementalProwConfigsFileNameSuffix) { + return nil + } + + fileStart := time.Now() + var cfg ProwConfig + if err := yamlToConfig(path, &cfg); err != nil { + errs = append(errs, err) + return nil + } + + if err := nc.ProwConfig.mergeFrom(&cfg); err != nil { + errs = append(errs, fmt.Errorf("failed to merge in config from %s: %w", path, err)) + } else { + logrus.WithField("prowConfig", path).WithField( + "duration", + time.Since(fileStart), + ).Traceln("config loaded") + prowConfigCount++ + } + + return nil + }, + ), + ) if err := utilerrors.NewAggregate(errs); err != nil { return nil, err } } - logrus.WithField("count", prowConfigCount).WithField("duration", time.Since(allStart)).Traceln("prowConfigs loaded successfully") + logrus.WithField("count", prowConfigCount).WithField( + "duration", + time.Since(allStart), + ).Traceln("prowConfigs loaded successfully") if err := parseProwConfig(&nc); err != nil { return nil, err @@ -1871,11 +2059,13 @@ func loadConfig(prowConfig, jobConfig string, additionalProwConfigDirs []string, return nil, errors.New("pubsub_subscriptions and pubsub_triggers are mutually exclusive") } for proj, topics := range nc.PubSubSubscriptions { - nc.PubSubTriggers = append(nc.PubSubTriggers, PubSubTrigger{ - Project: proj, - Topics: topics, - AllowedClusters: []string{"*"}, - }) + nc.PubSubTriggers = append( + nc.PubSubTriggers, PubSubTrigger{ + Project: proj, + Topics: topics, + AllowedClusters: []string{"*"}, + }, + ) } } for i, trigger := range nc.PubSubTriggers { @@ -1968,12 +2158,14 @@ func ReadFileMaybeGZIP(path string) ([]byte, error) { } func (c *Config) mergeJobConfig(jc JobConfig) error { - m, err := mergeJobConfigs(JobConfig{ - Presets: c.Presets, - PresubmitsStatic: c.PresubmitsStatic, - Periodics: c.Periodics, - PostsubmitsStatic: c.PostsubmitsStatic, - }, jc) + m, err := mergeJobConfigs( + JobConfig{ + Presets: c.Presets, + PresubmitsStatic: c.PresubmitsStatic, + Periodics: c.Periodics, + PostsubmitsStatic: c.PostsubmitsStatic, + }, jc, + ) if err != nil { return err } @@ -2176,7 +2368,14 @@ func (c *Config) validateComponentConfig() error { if c.ManagedWebhooks.OrgRepoConfig != nil { for repoName, repoValue := range c.ManagedWebhooks.OrgRepoConfig { if repoValue.TokenCreatedAfter.After(time.Now()) { - validationErrs = append(validationErrs, fmt.Errorf("token_created_after %s can be no later than current time for repo/org %s", repoValue.TokenCreatedAfter, repoName)) + validationErrs = append( + validationErrs, + fmt.Errorf( + "token_created_after %s can be no later than current time for repo/org %s", + repoValue.TokenCreatedAfter, + repoName, + ), + ) } } if len(validationErrs) > 0 { @@ -2193,6 +2392,15 @@ func (c *Config) validateComponentConfig() error { } } + if c.DingTalkReporterConfigs != nil { + for k, config := range c.DingTalkReporterConfigs { + if err := config.DefaultAndValidate(); err != nil { + return fmt.Errorf("failed to validate dingtalkreporter config: %w", err) + } + c.DingTalkReporterConfigs[k] = config + } + } + if err := c.Deck.FinalizeDefaultRerunAuthConfigs(); err != nil { return err } @@ -2298,7 +2506,15 @@ func (c Config) validatePresubmits(presubmits []Presubmit) error { continue } if otherPS.Context == ps.Context { - errs = append(errs, fmt.Errorf("jobs %s and %s report to the same GitHub context %q", otherPS.Name, ps.Name, otherPS.Context)) + errs = append( + errs, + fmt.Errorf( + "jobs %s and %s report to the same GitHub context %q", + otherPS.Name, + ps.Name, + otherPS.Context, + ), + ) } } @@ -2314,7 +2530,13 @@ func (c Config) validatePresubmits(presubmits []Presubmit) error { validPresubmits[ps.Name] = append(validPresubmits[ps.Name], ps) } if duplicatePresubmits.Len() > 0 { - errs = append(errs, fmt.Errorf("duplicated presubmit jobs (consider both inrepo and central config): %v", sortStringSlice(duplicatePresubmits.UnsortedList()))) + errs = append( + errs, + fmt.Errorf( + "duplicated presubmit jobs (consider both inrepo and central config): %v", + sortStringSlice(duplicatePresubmits.UnsortedList()), + ), + ) } return utilerrors.NewAggregate(errs) @@ -2337,8 +2559,10 @@ func ValidateRefs(repo string, jobBase JobBase) error { } if dupes.Len() > 0 { - return fmt.Errorf("invalid job %s on repo %s: the following refs specified more than once: %s", - jobBase.Name, repo, strings.Join(sets.List(dupes), ",")) + return fmt.Errorf( + "invalid job %s on repo %s: the following refs specified more than once: %s", + jobBase.Name, repo, strings.Join(sets.List(dupes), ","), + ) } return nil } @@ -2361,7 +2585,15 @@ func (c Config) validatePostsubmits(postsubmits []Postsubmit) error { continue } if otherPS.Context == ps.Context { - errs = append(errs, fmt.Errorf("jobs %s and %s report to the same GitHub context %q", otherPS.Name, ps.Name, otherPS.Context)) + errs = append( + errs, + fmt.Errorf( + "jobs %s and %s report to the same GitHub context %q", + otherPS.Name, + ps.Name, + otherPS.Context, + ), + ) } } @@ -2377,7 +2609,13 @@ func (c Config) validatePostsubmits(postsubmits []Postsubmit) error { validPostsubmits[ps.Name] = append(validPostsubmits[ps.Name], ps) } if duplicatePostsubmits.Len() > 0 { - errs = append(errs, fmt.Errorf("duplicated postsubmit jobs (consider both inrepo and central config): %v", sortStringSlice(duplicatePostsubmits.UnsortedList()))) + errs = append( + errs, + fmt.Errorf( + "duplicated postsubmit jobs (consider both inrepo and central config): %v", + sortStringSlice(duplicatePostsubmits.UnsortedList()), + ), + ) } return utilerrors.NewAggregate(errs) @@ -2411,11 +2649,17 @@ func (c Config) validatePeriodics(periodics []Periodic) error { seen += 1 } if seen > 1 { - errs = append(errs, fmt.Errorf("cron, interval, and minimum_interval are mutually exclusive in periodic %s", p.Name)) + errs = append( + errs, + fmt.Errorf("cron, interval, and minimum_interval are mutually exclusive in periodic %s", p.Name), + ) continue } if seen == 0 { - errs = append(errs, fmt.Errorf("at least one of cron, interval, or minimum_interval must be set in periodic %s", p.Name)) + errs = append( + errs, + fmt.Errorf("at least one of cron, interval, or minimum_interval must be set in periodic %s", p.Name), + ) continue } @@ -2506,7 +2750,11 @@ func parseProwConfig(c *Config) error { } if len(c.GitHubReporter.JobTypesToReport) == 0 { - c.GitHubReporter.JobTypesToReport = append(c.GitHubReporter.JobTypesToReport, prowapi.PresubmitJob, prowapi.PostsubmitJob) + c.GitHubReporter.JobTypesToReport = append( + c.GitHubReporter.JobTypesToReport, + prowapi.PresubmitJob, + prowapi.PostsubmitJob, + ) } // validate entries are valid job types. @@ -2681,7 +2929,10 @@ func parseProwConfig(c *Config) error { c.Tide.MaxGoroutines = 20 } if c.Tide.MaxGoroutines <= 0 { - return fmt.Errorf("tide has invalid max_goroutines (%d), it needs to be a positive number", c.Tide.MaxGoroutines) + return fmt.Errorf( + "tide has invalid max_goroutines (%d), it needs to be a positive number", + c.Tide.MaxGoroutines, + ) } if len(c.Tide.TargetURLs) > 0 && c.Tide.TargetURL != "" { @@ -2809,14 +3060,18 @@ func parseTideMergeType(tideMergeTypes map[string]TideOrgMergeType) utilerrors.A for org, orgConfig := range tideMergeTypes { // Validate orgs if orgConfig.MergeType != "" && !isTideMergeTypeValid(orgConfig.MergeType) { - mergeTypeErrs = append(mergeTypeErrs, - fmt.Errorf("merge type %q for %s is not a valid type", orgConfig.MergeType, org)) + mergeTypeErrs = append( + mergeTypeErrs, + fmt.Errorf("merge type %q for %s is not a valid type", orgConfig.MergeType, org), + ) } for repo, repoConfig := range orgConfig.Repos { // Validate repos if repoConfig.MergeType != "" && !isTideMergeTypeValid(repoConfig.MergeType) { - mergeTypeErrs = append(mergeTypeErrs, - fmt.Errorf("merge type %q for %s/%s is not a valid type", repoConfig.MergeType, org, repo)) + mergeTypeErrs = append( + mergeTypeErrs, + fmt.Errorf("merge type %q for %s/%s is not a valid type", repoConfig.MergeType, org, repo), + ) } for branch, branchConfig := range repoConfig.Branches { // Validate branches @@ -2827,9 +3082,13 @@ func parseTideMergeType(tideMergeTypes map[string]TideOrgMergeType) utilerrors.A branchConfig.Regexpr = regexpr } if !isTideMergeTypeValid(branchConfig.MergeType) { - mergeTypeErrs = append(mergeTypeErrs, - fmt.Errorf("merge type %q for %s/%s@%s is not a valid type", - branchConfig.MergeType, org, repo, branch)) + mergeTypeErrs = append( + mergeTypeErrs, + fmt.Errorf( + "merge type %q for %s/%s@%s is not a valid type", + branchConfig.MergeType, org, repo, branch, + ), + ) } repoConfig.Branches[branch] = branchConfig } @@ -2933,7 +3192,11 @@ func resolvePresets(name string, labels map[string]string, spec *v1.PodSpec, pre var ReProwExtraRef = regexp.MustCompile(`PROW_EXTRA_GIT_REF_(\d+)`) -func ValidatePipelineRunSpec(jobType prowapi.ProwJobType, extraRefs []prowapi.Refs, spec *pipelinev1.PipelineRunSpec) error { +func ValidatePipelineRunSpec( + jobType prowapi.ProwJobType, + extraRefs []prowapi.Refs, + spec *pipelinev1.PipelineRunSpec, +) error { if spec == nil { return nil } @@ -2946,7 +3209,10 @@ func ValidatePipelineRunSpec(jobType prowapi.ProwJobType, extraRefs []prowapi.Re for _, task := range spec.PipelineSpec.Tasks { // Validate that periodic jobs don't request an implicit git ref. if jobType == prowapi.PeriodicJob && task.TaskRef.Name == ProwImplicitGitResource { - return fmt.Errorf("periodic jobs do not have an implicit git ref to replace %s", ProwImplicitGitResource) + return fmt.Errorf( + "periodic jobs do not have an implicit git ref to replace %s", + ProwImplicitGitResource, + ) } match := ReProwExtraRef.FindStringSubmatch(task.TaskRef.Name) @@ -2971,7 +3237,11 @@ func ValidatePipelineRunSpec(jobType prowapi.ProwJobType, extraRefs []prowapi.Re for i := range extraIndexes { strs = append(strs, strconv.Itoa(i)) } - return fmt.Errorf("%d extra_refs are specified, but the following PROW_EXTRA_GIT_REF_* indexes are used: %s", len(extraRefs), strings.Join(strs, ", ")) + return fmt.Errorf( + "%d extra_refs are specified, but the following PROW_EXTRA_GIT_REF_* indexes are used: %s", + len(extraRefs), + strings.Join(strs, ", "), + ) } return nil } @@ -2989,18 +3259,31 @@ func validatePodSpec(jobType prowapi.ProwJobType, spec *v1.PodSpec, decorationCo if n := len(spec.Containers); n < 1 { // We must return here to not cause an out of bounds panic in the remaining validation. - return utilerrors.NewAggregate(append(errs, fmt.Errorf("pod spec must specify at least 1 container, found: %d", n))) + return utilerrors.NewAggregate( + append( + errs, + fmt.Errorf("pod spec must specify at least 1 container, found: %d", n), + ), + ) } if n := len(spec.Containers); n > 1 && decorationConfig == nil { - return utilerrors.NewAggregate(append(errs, fmt.Errorf("pod utility decoration must be enabled to use multiple containers: %d", n))) + return utilerrors.NewAggregate( + append( + errs, + fmt.Errorf("pod utility decoration must be enabled to use multiple containers: %d", n), + ), + ) } if len(spec.Containers) > 1 { containerNames := sets.Set[string]{} for _, container := range spec.Containers { if container.Name == "" { - errs = append(errs, fmt.Errorf("container does not have name. all containers must have names when defining multiple containers")) + errs = append( + errs, + fmt.Errorf("container does not have name. all containers must have names when defining multiple containers"), + ) } if containerNames.Has(container.Name) { @@ -3009,7 +3292,13 @@ func validatePodSpec(jobType prowapi.ProwJobType, spec *v1.PodSpec, decorationCo containerNames.Insert(container.Name) if decorate.PodUtilsContainerNames().Has(container.Name) { - errs = append(errs, fmt.Errorf("container name %s is a reserved for decoration. please specify a different container name that does not conflict with pod utility container names", container.Name)) + errs = append( + errs, + fmt.Errorf( + "container name %s is a reserved for decoration. please specify a different container name that does not conflict with pod utility container names", + container.Name, + ), + ) } } } @@ -3053,7 +3342,10 @@ func validatePodSpec(jobType prowapi.ProwJobType, spec *v1.PodSpec, decorationCo errs = append(errs, fmt.Errorf("volumeMount name %s is reserved for decoration", mount.Name)) } if decorate.VolumeMountPathsOnTestContainer().Has(mount.MountPath) { - errs = append(errs, fmt.Errorf("mount %s at %s conflicts with decoration mount", mount.Name, mount.MountPath)) + errs = append( + errs, + fmt.Errorf("mount %s at %s conflicts with decoration mount", mount.Name, mount.MountPath), + ) } } } @@ -3064,14 +3356,23 @@ func validatePodSpec(jobType prowapi.ProwJobType, spec *v1.PodSpec, decorationCo func validateAlwaysRun(job Postsubmit) error { if job.AlwaysRun != nil && *job.AlwaysRun { if job.RunIfChanged != "" { - return fmt.Errorf("job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", + job.Name, + ) } if job.SkipIfOnlyChanged != "" { - return fmt.Errorf("job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", + job.Name, + ) } } if job.RunIfChanged != "" && job.SkipIfOnlyChanged != "" { - return fmt.Errorf("job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", + job.Name, + ) } return nil } @@ -3079,18 +3380,30 @@ func validateAlwaysRun(job Postsubmit) error { func validateTriggering(job Presubmit) error { if job.AlwaysRun { if job.RunIfChanged != "" { - return fmt.Errorf("job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", + job.Name, + ) } if job.SkipIfOnlyChanged != "" { - return fmt.Errorf("job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", + job.Name, + ) } } if job.RunIfChanged != "" && job.SkipIfOnlyChanged != "" { - return fmt.Errorf("job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", job.Name) + return fmt.Errorf( + "job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", + job.Name, + ) } if (job.Trigger != "" && job.RerunCommand == "") || (job.Trigger == "" && job.RerunCommand != "") { - return fmt.Errorf("either both of job.Trigger and job.RerunCommand must be set, wasnt the case for job %q", job.Name) + return fmt.Errorf( + "either both of job.Trigger and job.RerunCommand must be set, wasnt the case for job %q", + job.Name, + ) } return nil @@ -3105,7 +3418,10 @@ func validateReporting(j JobBase, r Reporter) error { } for label, value := range j.Labels { if label == kube.GerritReportLabel && value != "" { - return fmt.Errorf("gerrit report label %s set to non-empty string but job is configured to skip reporting.", label) + return fmt.Errorf( + "gerrit report label %s set to non-empty string but job is configured to skip reporting.", + label, + ) } } return nil @@ -3127,13 +3443,19 @@ func ValidateController(c *Controller, templateFuncMaps ...template.FuncMap) err return err } if c.MaxConcurrency < 0 { - return fmt.Errorf("controller has invalid max_concurrency (%d), it needs to be a non-negative number", c.MaxConcurrency) + return fmt.Errorf( + "controller has invalid max_concurrency (%d), it needs to be a non-negative number", + c.MaxConcurrency, + ) } if c.MaxGoroutines == 0 { c.MaxGoroutines = 20 } if c.MaxGoroutines <= 0 { - return fmt.Errorf("controller has invalid max_goroutines (%d), it needs to be a positive number", c.MaxGoroutines) + return fmt.Errorf( + "controller has invalid max_goroutines (%d), it needs to be a positive number", + c.MaxGoroutines, + ) } return nil } @@ -3230,7 +3552,12 @@ func SetPresubmitRegexes(js []Presubmit) error { return fmt.Errorf("could not compile trigger regex for %s: %w", j.Name, err) } if !js[i].re.MatchString(j.RerunCommand) { - return fmt.Errorf("for job %s, rerun command \"%s\" does not match trigger \"%s\"", j.Name, j.RerunCommand, j.Trigger) + return fmt.Errorf( + "for job %s, rerun command \"%s\" does not match trigger \"%s\"", + j.Name, + j.RerunCommand, + j.Trigger, + ) } b, err := setBrancherRegexes(j.Brancher) if err != nil { @@ -3346,14 +3673,25 @@ func StringsToOrgRepos(vs []string) []OrgRepo { // If you extend this, please also extend HasConfigFor accordingly. func (pc *ProwConfig) mergeFrom(additional *ProwConfig) error { emptyReference := &ProwConfig{ - BranchProtection: additional.BranchProtection, - Tide: Tide{TideGitHubConfig: TideGitHubConfig{MergeType: additional.Tide.MergeType, Queries: additional.Tide.Queries}}, + BranchProtection: additional.BranchProtection, + Tide: Tide{ + TideGitHubConfig: TideGitHubConfig{ + MergeType: additional.Tide.MergeType, + Queries: additional.Tide.Queries, + }, + }, SlackReporterConfigs: additional.SlackReporterConfigs, } var errs []error if diff := cmp.Diff(additional, emptyReference, DefaultDiffOpts...); diff != "" { - errs = append(errs, fmt.Errorf("only 'branch-protection', 'slack_reporter_configs', 'tide.merge_method' and 'tide.queries' may be set via additional config, all other fields have no merging logic yet. Diff: %s", diff)) + errs = append( + errs, + fmt.Errorf( + "only 'branch-protection', 'slack_reporter_configs', 'dingtalk_reporter_configs', 'tide.merge_method' and 'tide.queries' may be set via additional config, all other fields have no merging logic yet. Diff: %s", + diff, + ), + ) } if err := pc.BranchProtection.merge(&additional.BranchProtection); err != nil { errs = append(errs, fmt.Errorf("failed to merge branch protection config: %w", err)) @@ -3368,6 +3706,12 @@ func (pc *ProwConfig) mergeFrom(additional *ProwConfig) error { errs = append(errs, fmt.Errorf("failed to merge slack-reporter config: %w", err)) } + if pc.DingTalkReporterConfigs == nil { + pc.DingTalkReporterConfigs = additional.DingTalkReporterConfigs + } else if err := pc.DingTalkReporterConfigs.mergeFrom(&additional.DingTalkReporterConfigs); err != nil { + errs = append(errs, fmt.Errorf("failed to merge ding-talk config: %w", err)) + } + return utilerrors.NewAggregate(errs) } @@ -3401,7 +3745,10 @@ func BaseSHAFromContextDescription(description string) string { // SHA1s are always 40 digits long. if len(split) != 2 || len(split[1]) != 40 { // Fallback to deprecated one if available. - if split = strings.Split(description, contextDescriptionBaseSHADelimiterDeprecated); len(split) == 2 && len(split[1]) == 40 { + if split = strings.Split( + description, + contextDescriptionBaseSHADelimiterDeprecated, + ); len(split) == 2 && len(split[1]) == 40 { return split[1] } return "" @@ -3473,8 +3820,13 @@ func (pc *ProwConfig) hasGlobalConfig() bool { return true } emptyReference := &ProwConfig{ - BranchProtection: pc.BranchProtection, - Tide: Tide{TideGitHubConfig: TideGitHubConfig{MergeType: pc.Tide.MergeType, Queries: pc.Tide.Queries}}, + BranchProtection: pc.BranchProtection, + Tide: Tide{ + TideGitHubConfig: TideGitHubConfig{ + MergeType: pc.Tide.MergeType, + Queries: pc.Tide.Queries, + }, + }, SlackReporterConfigs: pc.SlackReporterConfigs, } return cmp.Diff(pc, emptyReference, DefaultDiffOpts...) != "" @@ -3493,35 +3845,39 @@ func (tm tideQueryMap) queries() (TideQueries, error) { if err := json.Unmarshal([]byte(k), &queryConfig); err != nil { return nil, fmt.Errorf("failed to unmarshal %q: %w", k, err) } - result = append(result, TideQuery{ - Orgs: v.Orgs, - Repos: v.Repos, - ExcludedRepos: v.ExcludedRepos, - Author: queryConfig.Author, - ExcludedBranches: queryConfig.ExcludedBranches, - IncludedBranches: queryConfig.IncludedBranches, - Labels: queryConfig.Labels, - MissingLabels: queryConfig.MissingLabels, - Milestone: queryConfig.Milestone, - ReviewApprovedRequired: queryConfig.ReviewApprovedRequired, - }) + result = append( + result, TideQuery{ + Orgs: v.Orgs, + Repos: v.Repos, + ExcludedRepos: v.ExcludedRepos, + Author: queryConfig.Author, + ExcludedBranches: queryConfig.ExcludedBranches, + IncludedBranches: queryConfig.IncludedBranches, + Labels: queryConfig.Labels, + MissingLabels: queryConfig.MissingLabels, + Milestone: queryConfig.Milestone, + ReviewApprovedRequired: queryConfig.ReviewApprovedRequired, + }, + ) } // Sort the queries here to make sure that the de-duplication results // in a deterministic order. var errs []error - sort.SliceStable(result, func(i, j int) bool { - iSerialized, err := json.Marshal(result[i]) - if err != nil { - errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[i], err)) - } - jSerialized, err := json.Marshal(result[j]) - if err != nil { - errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[j], err)) - } - return string(iSerialized) < string(jSerialized) - }) + sort.SliceStable( + result, func(i, j int) bool { + iSerialized, err := json.Marshal(result[i]) + if err != nil { + errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[i], err)) + } + jSerialized, err := json.Marshal(result[j]) + if err != nil { + errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[j], err)) + } + return string(iSerialized) < string(jSerialized) + }, + ) return result, utilerrors.NewAggregate(errs) } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index aafa6e9fc..a8416c098 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -4733,6 +4733,159 @@ func TestValidateComponentConfig(t *testing.T) { } } +func TestDingTalkReporterValidation(t *testing.T) { + testCases := []struct { + name string + config func() Config + successExpected bool + }{ + { + name: "Valid config w/ wildcard dingtalk_reporter_configs - no error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "*": { + DingTalkReporterConfig: prowapi.DingTalkReporterConfig{ + Token: "my-token", + }, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: true, + }, + { + name: "Valid config w/ org/repo dingtalk_reporter_configs - no error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "istio/proxy": { + DingTalkReporterConfig: prowapi.DingTalkReporterConfig{ + Token: "my-token", + }, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: true, + }, + { + name: "Valid config w/ repo dingtalk_reporter_configs - no error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "proxy": { + DingTalkReporterConfig: prowapi.DingTalkReporterConfig{ + Token: "my-token", + }, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: true, + }, + { + name: "No token w/ dingtalk_reporter_configs - error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "*": { + JobTypesToReport: []prowapi.ProwJobType{"presubmit"}, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: false, + }, + { + name: "Empty config - no error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{} + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: true, + }, + { + name: "Invalid template - error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "*": { + DingTalkReporterConfig: prowapi.DingTalkReporterConfig{ + Token: "my-token", + ReportTemplate: "{{ if .Spec.Name}}", + }, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: false, + }, + { + name: "Template accessed invalid property - error", + config: func() Config { + dingTalkCfg := map[string]DingTalkReporter{ + "*": { + DingTalkReporterConfig: prowapi.DingTalkReporterConfig{ + Token: "my-token", + ReportTemplate: `{{{ $repo := "" }}{{with .Spec.Refs}}{{$repo = .Repo}}{{end}}{{if eq $repo ""}}{{with index .Spec.ExtraRefs 0}}{{$repo = .Repo}}{{end}}{{end}}## Repo: {{ $repo }} +--- +- Job: {{.Spec.Job}} +- Type: {{.Spec.Type}} +- State: {{if eq .Status.State "triggered"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "pending"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "success"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "failure"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "aborted"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "error"}}**{{.Status.State}}**{{end}} +- Log: [View logs]({{.Status.URL}})`, + }, + }, + } + return Config{ + ProwConfig: ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + successExpected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := tc.config() + if err := cfg.validateComponentConfig(); (err == nil) != tc.successExpected { + t.Errorf("Expected success=%t but got err=%v", tc.successExpected, err) + } + if tc.successExpected { + for _, config := range cfg.DingTalkReporterConfigs { + if config.ReportTemplate == "" { + t.Errorf("expected default ReportTemplate to be set") + } + if config.Token == "" { + t.Errorf("expected Channel to be required") + } + } + } + }) + } +} + func TestSlackReporterValidation(t *testing.T) { testCases := []struct { name string diff --git a/pkg/config/prow-config-documented.yaml b/pkg/config/prow-config-documented.yaml index 89c553063..d87a2f122 100644 --- a/pkg/config/prow-config-documented.yaml +++ b/pkg/config/prow-config-documented.yaml @@ -431,6 +431,15 @@ deck: # DefaultJobTimeout this is default deadline for prow jobs. This value is used when # no timeout is configured at the job level. This value is set to 24 hours. default_job_timeout: 0s +dingtalk_reporter_configs: + "": + job_states_to_report: + - "" + job_types_to_report: + - "" + report: false + report_template: ' ' + token: ' ' # DisabledClusters holds a list of disabled build cluster names. The same context names will be ignored while # Prow components load the kubeconfig files. disabled_clusters: diff --git a/pkg/crier/reporters/dingtalk/reporter.go b/pkg/crier/reporters/dingtalk/reporter.go new file mode 100644 index 000000000..f8bdc063b --- /dev/null +++ b/pkg/crier/reporters/dingtalk/reporter.go @@ -0,0 +1,155 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dingtalk + +import ( + "bytes" + "context" + "errors" + "fmt" + "html/template" + + prowapi "sigs.k8s.io/prow/pkg/apis/prowjobs/v1" + "sigs.k8s.io/prow/pkg/config" + + "github.com/sirupsen/logrus" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + dingtalkclient "sigs.k8s.io/prow/pkg/dingtalk" +) + +const ( + reporterName = "dingTalk-reporter" +) + +type dingTalkClient interface { + WriteMessage(msg, token string) error +} + +type dingTalkReporter struct { + client dingTalkClient + config func(*prowapi.Refs) config.DingTalkReporter + dryRun bool +} + +func (sr *dingTalkReporter) getConfig(pj *prowapi.ProwJob) (*config.DingTalkReporter, *prowapi.DingTalkReporterConfig) { + refs := pj.Spec.Refs + if refs == nil && len(pj.Spec.ExtraRefs) > 0 { + refs = &pj.Spec.ExtraRefs[0] + } + globalConfig := sr.config(refs) + var jobDingTalkConfig *prowapi.DingTalkReporterConfig + if pj.Spec.ReporterConfig != nil && pj.Spec.ReporterConfig.DingTalk != nil { + jobDingTalkConfig = pj.Spec.ReporterConfig.DingTalk + } + return &globalConfig, jobDingTalkConfig +} + +func (sr *dingTalkReporter) Report( + _ context.Context, + log *logrus.Entry, + pj *prowapi.ProwJob, +) ([]*prowapi.ProwJob, *reconcile.Result, error) { + return []*prowapi.ProwJob{pj}, nil, sr.report(log, pj) +} + +func (sr *dingTalkReporter) report(log *logrus.Entry, pj *prowapi.ProwJob) error { + globalDingTalkConfig, jobDingTalkConfig := sr.getConfig(pj) + if globalDingTalkConfig != nil { + jobDingTalkConfig = jobDingTalkConfig.ApplyDefault(&globalDingTalkConfig.DingTalkReporterConfig) + } + if jobDingTalkConfig == nil { + return errors.New("resolved dingTalk config is empty") // Shouldn't happen at all, just in case + } + + b := &bytes.Buffer{} + tmpl, err := template.New("").Parse(jobDingTalkConfig.ReportTemplate) + if err != nil { + log.WithError(err).Error("failed to parse template") + return fmt.Errorf("failed to parse template: %w", err) + } + if err := tmpl.Execute(b, pj); err != nil { + log.WithError(err).Error("failed to execute report template") + return fmt.Errorf("failed to execute report template: %w", err) + } + if sr.dryRun { + log.WithField("messagejson", b.String()).Debug("Skipping reporting because dry-run is enabled") + return nil + } + if err := sr.client.WriteMessage(b.String(), jobDingTalkConfig.Token); err != nil { + log.WithError(err).Error("failed to write DingTalk message") + return fmt.Errorf("failed to write DingTalk message: %w", err) + } + return nil +} + +func (sr *dingTalkReporter) GetName() string { + return reporterName +} + +func (sr *dingTalkReporter) ShouldReport(_ context.Context, logger *logrus.Entry, pj *prowapi.ProwJob) bool { + globalDingTalkConfig, jobDingTalkConfig := sr.getConfig(pj) + + var typeShouldReport bool + if globalDingTalkConfig.JobTypesToReport != nil { + for _, tp := range globalDingTalkConfig.JobTypesToReport { + if tp == pj.Spec.Type { + typeShouldReport = true + break + } + } + } + + // If a user specifically put a token on their job, they want + // it to be reported regardless of the job types setting. + var jobShouldReport bool + if jobDingTalkConfig != nil && jobDingTalkConfig.Token != "" { + jobShouldReport = true + } + + // The job should only be reported if its state has a match with the + // JobStatesToReport config. + // Note the JobStatesToReport configured in the Prow job can overwrite the + // Prow config. + var stateShouldReport bool + if merged := jobDingTalkConfig.ApplyDefault(&globalDingTalkConfig.DingTalkReporterConfig); merged != nil && merged.JobStatesToReport != nil { + if merged.Report != nil && !*merged.Report { + logger.WithField( + "job_states_to_report", + merged.JobStatesToReport, + ).Debug("Skip dingTalk reporting as 'report: false', could result from 'job_states_to_report: []'.") + return false + } + for _, stateToReport := range merged.JobStatesToReport { + if pj.Status.State == stateToReport { + stateShouldReport = true + break + } + } + } + + shouldReport := stateShouldReport && (typeShouldReport || jobShouldReport) + logger.WithField("reporting", shouldReport).Debug("Determined should report") + return shouldReport +} + +func New(cfg func(refs *prowapi.Refs) config.DingTalkReporter, dryRun bool) *dingTalkReporter { + return &dingTalkReporter{ + client: dingtalkclient.NewClient(), + config: cfg, + dryRun: dryRun, + } +} diff --git a/pkg/crier/reporters/dingtalk/reporter_test.go b/pkg/crier/reporters/dingtalk/reporter_test.go new file mode 100644 index 000000000..39e57be6d --- /dev/null +++ b/pkg/crier/reporters/dingtalk/reporter_test.go @@ -0,0 +1,693 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dingtalk + +import ( + "context" + "testing" + + v1 "sigs.k8s.io/prow/pkg/apis/prowjobs/v1" + "sigs.k8s.io/prow/pkg/config" + + "github.com/sirupsen/logrus" +) + +func TestShouldReport(t *testing.T) { + boolPtr := func(b bool) *bool { + return &b + } + testCases := []struct { + name string + config config.DingTalkReporter + pj *v1.ProwJob + expected bool + }{ + { + name: "Presubmit Job should report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PresubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PresubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: true, + }, + { + name: "Wrong job type should not report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PresubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + { + name: "Successful Job should report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: true, + }, + { + name: "Successful Job with report:false should not report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + Report: boolPtr(false), + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + { + name: "Successful Job with report:true should report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + Report: boolPtr(true), + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: true, + }, + { + // Note: this is impossible to hit, as roundtrip with `omitempty` + // would never result in empty slice. + name: "Empty job config settings negate global", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{JobStatesToReport: []v1.ProwJobState{}}, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + { + name: "Nil job config settings does not negate global", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{JobStatesToReport: nil}, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: true, + }, + { + name: "Successful Job should not report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PostsubmitJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.PendingState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + { + name: "Job with channel config should ignore the JobTypesToReport config", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{Token: "some-token"}, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: true, + }, + { + name: "JobStatesToReport in Job config should override the one in Prow config", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{ + Token: "some-token", + JobStatesToReport: []v1.ProwJobState{v1.FailureState, v1.PendingState}, + }, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.FailureState, + }, + }, + expected: true, + }, + { + name: "Job with channel config but does not have matched state in Prow config should not report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{Token: "some-token"}, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.PendingState, + }, + }, + expected: false, + }, + { + name: "Job with channel and state config where the state does not match, should not report", + config: config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{ + Token: "some-token", + JobStatesToReport: []v1.ProwJobState{v1.FailureState, v1.PendingState}, + }, + }, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + { + name: "Empty config should not report", + config: config.DingTalkReporter{}, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + }, + expected: false, + }, + } + + for _, tc := range testCases { + cfgGetter := func(*v1.Refs) config.DingTalkReporter { + return tc.config + } + t.Run( + tc.name, func(t *testing.T) { + reporter := &dingTalkReporter{ + config: cfgGetter, + } + + if result := reporter.ShouldReport( + context.Background(), + logrus.NewEntry(logrus.StandardLogger()), + tc.pj, + ); result != tc.expected { + t.Errorf("expected result to be %t but was %t", tc.expected, result) + } + }, + ) + } +} + +func TestReloadsConfig(t *testing.T) { + cfg := config.DingTalkReporter{} + cfgGetter := func(*v1.Refs) config.DingTalkReporter { + return cfg + } + + pj := &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PostsubmitJob, + }, + Status: v1.ProwJobStatus{ + State: v1.FailureState, + }, + } + + reporter := &dingTalkReporter{ + config: cfgGetter, + } + + if shouldReport := reporter.ShouldReport( + context.Background(), + logrus.NewEntry(logrus.StandardLogger()), + pj, + ); shouldReport { + t.Error("Did expect shouldReport to be false") + } + + cfg.JobStatesToReport = []v1.ProwJobState{v1.FailureState} + cfg.JobTypesToReport = []v1.ProwJobType{v1.PostsubmitJob} + + if shouldReport := reporter.ShouldReport( + context.Background(), + logrus.NewEntry(logrus.StandardLogger()), + pj, + ); !shouldReport { + t.Error("Did expect shouldReport to be true after config change") + } +} + +func TestUsesTokenOverrideFromJob(t *testing.T) { + testCases := []struct { + name string + config func() config.Config + pj *v1.ProwJob + wantToken string + emptyExpected bool + }{ + { + name: "No job-level config, use global default", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "*": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "global-default", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{Spec: v1.ProwJobSpec{}}, + wantToken: "global-default", + }, + { + name: "org/repo for ref exists in config, use it", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "*": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "global-default", + }, + }, + "istio/proxy": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-repo-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Refs: &v1.Refs{ + Org: "istio", + Repo: "proxy", + }, + }, + }, + wantToken: "org-repo-config", + }, + { + name: "org for ref exists in config, use it", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "*": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "global-default", + }, + }, + "istio": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Refs: &v1.Refs{ + Org: "istio", + Repo: "proxy", + }, + }, + }, + wantToken: "org-config", + }, + { + name: "org/repo takes precedence over org", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "*": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "global-default", + }, + }, + "istio": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-config", + }, + }, + "istio/proxy": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-repo-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Refs: &v1.Refs{ + Org: "istio", + Repo: "proxy", + }, + }, + }, + wantToken: "org-repo-config", + }, + { + name: "Job-level config present, use it", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "*": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "global-default", + }, + }, + "istio": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-config", + }, + }, + "istio/proxy": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-repo-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + ReporterConfig: &v1.ReporterConfig{ + DingTalk: &v1.DingTalkReporterConfig{ + Token: "team-a", + }, + }, + }, + }, + wantToken: "team-a", + }, + { + name: "No matching dingTalk config", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "istio": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-config", + }, + }, + "istio/proxy": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-repo-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Refs: &v1.Refs{ + Org: "unknownorg", + Repo: "unknownrepo", + }, + }, + }, + emptyExpected: true, + }, + { + name: "Refs unset but extra refs exist, use it", + config: func() config.Config { + dingTalkCfg := map[string]config.DingTalkReporter{ + "istio/proxy": { + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + Token: "org-repo-config", + }, + }, + } + return config.Config{ + ProwConfig: config.ProwConfig{ + DingTalkReporterConfigs: dingTalkCfg, + }, + } + }, + pj: &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + ExtraRefs: []v1.Refs{ + { + Org: "istio", + Repo: "proxy", + }, + }, + }, + }, + wantToken: "org-repo-config", + }, + } + + for _, tc := range testCases { + t.Run( + tc.name, func(t *testing.T) { + cfgGetter := func(refs *v1.Refs) config.DingTalkReporter { + return tc.config().DingTalkReporterConfigs.GetDingTalkReporter(refs) + } + sr := dingTalkReporter{ + config: cfgGetter, + } + + prowSlackCfg, jobDingTalkCfg := sr.getConfig(tc.pj) + jobDingTalkCfg = jobDingTalkCfg.ApplyDefault(&prowSlackCfg.DingTalkReporterConfig) + gotToken := jobDingTalkCfg.Token + if gotToken != tc.wantToken { + t.Fatalf("Expected token: %q, got: %q", tc.wantToken, gotToken) + } + }, + ) + } +} + +func TestShouldReportDefaultsToExtraRefs(t *testing.T) { + job := &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PeriodicJob, + ExtraRefs: []v1.Refs{{Org: "org"}}, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + } + sr := dingTalkReporter{ + config: func(r *v1.Refs) config.DingTalkReporter { + if r.Org == "org" { + return config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PeriodicJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + }, + } + } + return config.DingTalkReporter{} + }, + } + + if !sr.ShouldReport(context.Background(), logrus.NewEntry(logrus.StandardLogger()), job) { + t.Fatal("expected job to report but did not") + } +} + +type fakeDingTalkClient struct { + messages map[string]string +} + +func (fsc *fakeDingTalkClient) WriteMessage(msg, token string) error { + if fsc.messages == nil { + fsc.messages = map[string]string{} + } + fsc.messages[token] = msg + return nil +} + +var _ dingTalkClient = &fakeDingTalkClient{} + +func TestReportDefaultsToExtraRefs(t *testing.T) { + job := &v1.ProwJob{ + Spec: v1.ProwJobSpec{ + Type: v1.PeriodicJob, + ExtraRefs: []v1.Refs{{Org: "org", Repo: "repo"}}, + }, + Status: v1.ProwJobStatus{ + State: v1.SuccessState, + }, + } + fsc := &fakeDingTalkClient{} + sr := dingTalkReporter{ + config: func(r *v1.Refs) config.DingTalkReporter { + if r.Org == "org" { + return config.DingTalkReporter{ + JobTypesToReport: []v1.ProwJobType{v1.PeriodicJob}, + DingTalkReporterConfig: v1.DingTalkReporterConfig{ + JobStatesToReport: []v1.ProwJobState{v1.SuccessState}, + Token: "emercengy", + ReportTemplate: `{{ $repo := "" }}{{with .Spec.Refs}}{{$repo = .Repo}}{{end}}{{if eq $repo ""}}{{with index .Spec.ExtraRefs 0}}{{$repo = .Repo}}{{end}}{{end}}## Repo: {{ $repo }} +--- +- Job: {{.Spec.Job}} +- Type: {{.Spec.Type}} +- State: {{if eq .Status.State "triggered"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "pending"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "success"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "failure"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "aborted"}}**{{.Status.State}}**{{end}}{{if eq .Status.State "error"}}**{{.Status.State}}**{{end}} +- Log: [View logs]({{.Status.URL}})`, + }, + } + } + return config.DingTalkReporter{} + }, + client: fsc, + } + wantMessage := `## Repo: repo +--- +- Job: +- Type: periodic +- State: **success** +- Log: [View logs]()` + if _, _, err := sr.Report(context.Background(), logrus.NewEntry(logrus.StandardLogger()), job); err != nil { + t.Fatalf("reporting failed: %v", err) + } + if fsc.messages["emercengy"] != wantMessage { + t.Errorf( + "expected the token 'emergency' to contain message 'there you go' but wasn't the case, all messages: %v", + fsc.messages, + ) + } +} diff --git a/pkg/dingtalk/client.go b/pkg/dingtalk/client.go new file mode 100644 index 000000000..7d713a79c --- /dev/null +++ b/pkg/dingtalk/client.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dingtalk + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/sirupsen/logrus" +) + +// Logger provides an interface to log debug messages. +type Logger interface { + Debugf(s string, v ...interface{}) +} + +// Client allows you to provide connection to Slack API Server +// It contains a token that allows to authenticate connection to post and work with channels in the domain +type Client struct { + // If logger is non-nil, log all method calls with it. + logger Logger + fake bool +} + +type dingTalkMsg struct { + MsgType string `json:"msgtype"` + Markdown markdown `json:"markdown"` +} + +type markdown struct { + Title string `json:"title"` + Text string `json:"text"` +} + +const ( + // https://oapi.dingtalk.com/robot/send?access_token=5c8422af69cd510952936d240e260842c5d2e20b10966ee76ac93cc10a2e233b + chatPostMessage = "https://oapi.dingtalk.com/robot/send" +) + +// NewClient creates a slack client with an API token. +func NewClient() *Client { + return &Client{ + logger: logrus.WithField("client", "dingTalk"), + } +} + +// NewFakeClient returns a client that takes no actions. +func NewFakeClient() *Client { + return &Client{ + fake: true, + } +} + +func (sl *Client) log(methodName string, args ...interface{}) { + if sl.logger == nil { + return + } + var as []string + for _, arg := range args { + as = append(as, fmt.Sprintf("%v", arg)) + } + sl.logger.Debugf("%s(%s)", methodName, strings.Join(as, ", ")) +} + +func (sl *Client) postMessage(msg, token string) error { + u, _ := url.Parse(chatPostMessage) + var uv = url.Values{} + uv.Add("access_token", token) + u.RawQuery = uv.Encode() + + dtMsg := dingTalkMsg{ + MsgType: "markdown", + Markdown: markdown{ + Title: "notify", + Text: msg, + }, + } + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(dtMsg); err != nil { + return err + } + + resp, err := http.Post(u.String(), "application/json", &buf) + if err != nil { + return err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + apiResponse := struct { + Code int `json:"errcode"` + Error string `json:"errmsg"` + }{} + + if err := json.Unmarshal(body, &apiResponse); err != nil { + return fmt.Errorf("API returned invalid JSON (%q): %w", string(body), err) + } + + if resp.StatusCode != 200 || + apiResponse.Code != 0 && apiResponse.Error != "ok" { + return fmt.Errorf("request failed: %s", apiResponse.Error) + } + + return nil +} + +// WriteMessage adds text to channel +func (sl *Client) WriteMessage(msg, token string) error { + sl.log("WriteMessage", msg, token) + if sl.fake { + return nil + } + + if err := sl.postMessage(msg, token); err != nil { + return fmt.Errorf("failed to post message to %s: %w", token, err) + } + return nil +} diff --git a/pkg/spyglass/lenses/common/bindata.go b/pkg/spyglass/lenses/common/bindata.go index 233094a52..733c0a62a 100644 --- a/pkg/spyglass/lenses/common/bindata.go +++ b/pkg/spyglass/lenses/common/bindata.go @@ -93,7 +93,7 @@ func staticSpyglassLensHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "static/spyglass-lens.html", size: 616, mode: os.FileMode(436), modTime: time.Unix(1724780439, 0)} + info := bindataFileInfo{name: "static/spyglass-lens.html", size: 616, mode: os.FileMode(420), modTime: time.Unix(1732374776, 0)} a := &asset{bytes: bytes, info: info} return a, nil }