diff --git a/README.md b/README.md
index f9a4eef7e1..02533a34b4 100644
--- a/README.md
+++ b/README.md
@@ -128,6 +128,7 @@ _This mode of operation is ideal for continuous monitoring of your cluster and c
* Run `k8sgpt filters` to manage the active filters used by the analyzer. By default, all filters are executed during analysis.
* Run `k8sgpt analyze` to run a scan.
* And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
+* You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documention from kubernetes.
## Analyzers
@@ -163,6 +164,7 @@ _Run a scan with the default analyzers_
k8sgpt generate
k8sgpt auth add
k8sgpt analyze --explain
+k8sgpt analyze --explain --with-doc
```
_Filter on resource_
@@ -279,7 +281,7 @@ curl -X GET "http://localhost:8080/analyze?namespace=k8sgpt&explain=false"
LocalAI provider
-To run local models, it is possible to use OpenAI compatible APIs, for instance [LocalAI](https://github.com/go-skynet/LocalAI) which uses [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml) to run inference on consumer-grade hardware. Models supported by LocalAI for instance are Vicuna, Alpaca, LLaMA, Cerebras, GPT4ALL, GPT4ALL-J and koala.
+To run local models, it is possible to use OpenAI compatible APIs, for instance [LocalAI](https://github.com/go-skynet/LocalAI) which uses [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml) to run inference on consumer-grade hardware. Models supported by LocalAI for instance are Vicuna, Alpaca, LLaMA, Cerebras, GPT4ALL, GPT4ALL-J and koala.
To run local inference, you need to download the models first, for instance you can find `ggml` compatible models in [huggingface.com](https://huggingface.co/models?search=ggml) (for example vicuna, alpaca and koala).
@@ -309,16 +311,16 @@ k8sgpt analyze --explain --backend localai
Prerequisites: an Azure OpenAI deployment is needed, please visit MS official [documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource) to create your own.
-To authenticate with k8sgpt, you will need the Azure OpenAI endpoint of your tenant `"https://your Azure OpenAI Endpoint"`, the api key to access your deployment, the deployment name of your model and the model name itself.
+To authenticate with k8sgpt, you will need the Azure OpenAI endpoint of your tenant `"https://your Azure OpenAI Endpoint"`, the api key to access your deployment, the deployment name of your model and the model name itself.
-To run k8sgpt, run `k8sgpt auth` with the `azureopenai` backend:
+To run k8sgpt, run `k8sgpt auth` with the `azureopenai` backend:
```
k8sgpt auth add --backend azureopenai --baseurl https:// --engine --model
```
Lastly, enter your Azure API key, after the prompt.
-Now you are ready to analyze with the azure openai backend:
+Now you are ready to analyze with the azure openai backend:
```
k8sgpt analyze --explain --backend azureopenai
```
@@ -395,31 +397,31 @@ The Kubernetes system is trying to scale a StatefulSet named fake-deployment usi
Config file locations:
| OS | Path |
-|---------|--------------------------------------------------|
+| ------- | ------------------------------------------------ |
| MacOS | ~/Library/Application Support/k8sgpt/k8sgpt.yaml |
| Linux | ~/.config/k8sgpt/k8sgpt.yaml |
| Windows | %LOCALAPPDATA%/k8sgpt/k8sgpt.yaml |
-There may be scenarios where caching remotely is prefered.
+There may be scenarios where caching remotely is prefered.
In these scenarios K8sGPT supports AWS S3 Integration.
Remote caching
_As a prerequisite `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are required as environmental variables._
-
+
_Adding a remote cache_
Note: this will create the bucket if it does not exist
```
k8sgpt cache add --region --bucket
```
-
+
_Listing cache items_
```
k8sgpt cache list
```
-
+
_Removing the remote cache_
Note: this will not delete the bucket
```
diff --git a/cmd/analyze/analyze.go b/cmd/analyze/analyze.go
index 535c0d08f3..9f202d2e1a 100644
--- a/cmd/analyze/analyze.go
+++ b/cmd/analyze/analyze.go
@@ -32,6 +32,7 @@ var (
namespace string
anonymize bool
maxConcurrency int
+ withDoc bool
)
// AnalyzeCmd represents the problems command
@@ -45,7 +46,7 @@ var AnalyzeCmd = &cobra.Command{
// AnalysisResult configuration
config, err := analysis.NewAnalysis(backend,
- language, filters, namespace, nocache, explain, maxConcurrency)
+ language, filters, namespace, nocache, explain, maxConcurrency, withDoc)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
@@ -91,4 +92,6 @@ func init() {
AnalyzeCmd.Flags().StringVarP(&language, "language", "l", "english", "Languages to use for AI (e.g. 'English', 'Spanish', 'French', 'German', 'Italian', 'Portuguese', 'Dutch', 'Russian', 'Chinese', 'Japanese', 'Korean')")
// add max concurrency
AnalyzeCmd.Flags().IntVarP(&maxConcurrency, "max-concurrency", "m", 10, "Maximum number of concurrent requests to the Kubernetes API server")
+ // kubernetes doc flag
+ AnalyzeCmd.Flags().BoolVarP(&withDoc, "with-doc", "d", false, "Give me the official documentation of the involved field")
}
diff --git a/go.mod b/go.mod
index 77f9529c51..b56837eb32 100644
--- a/go.mod
+++ b/go.mod
@@ -75,7 +75,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
- github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/gnostic v0.6.9
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-containerregistry v0.14.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
diff --git a/pkg/analysis/analysis.go b/pkg/analysis/analysis.go
index 8f7ce408bf..d70cf3f4ca 100644
--- a/pkg/analysis/analysis.go
+++ b/pkg/analysis/analysis.go
@@ -23,6 +23,7 @@ import (
"sync"
"github.com/fatih/color"
+ openapi_v2 "github.com/google/gnostic/openapiv2"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
@@ -45,6 +46,7 @@ type Analysis struct {
Explain bool
MaxConcurrency int
AnalysisAIProvider string // The name of the AI Provider used for this analysis
+ WithDoc bool
}
type AnalysisStatus string
@@ -63,7 +65,7 @@ type JsonOutput struct {
Results []common.Result `json:"results"`
}
-func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool, maxConcurrency int) (*Analysis, error) {
+func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool, maxConcurrency int, withDoc bool) (*Analysis, error) {
var configAI ai.AIConfiguration
err := viper.UnmarshalKey("ai", &configAI)
if err != nil {
@@ -128,6 +130,7 @@ func NewAnalysis(backend string, language string, filters []string, namespace st
Explain: explain,
MaxConcurrency: maxConcurrency,
AnalysisAIProvider: backend,
+ WithDoc: withDoc,
}, nil
}
@@ -136,11 +139,23 @@ func (a *Analysis) RunAnalysis() {
coreAnalyzerMap, analyzerMap := analyzer.GetAnalyzerMap()
+ // we get the openapi schema from the server only if required by the flag "with-doc"
+ openapiSchema := &openapi_v2.Document{}
+ if a.WithDoc {
+ var openApiErr error
+
+ openapiSchema, openApiErr = a.Client.Client.Discovery().OpenAPISchema()
+ if openApiErr != nil {
+ a.Errors = append(a.Errors, fmt.Sprintf("[KubernetesDoc] %s", openApiErr))
+ }
+ }
+
analyzerConfig := common.Analyzer{
- Client: a.Client,
- Context: a.Context,
- Namespace: a.Namespace,
- AIClient: a.AIClient,
+ Client: a.Client,
+ Context: a.Context,
+ Namespace: a.Namespace,
+ AIClient: a.AIClient,
+ OpenapiSchema: openapiSchema,
}
semaphore := make(chan struct{}, a.MaxConcurrency)
diff --git a/pkg/analysis/output.go b/pkg/analysis/output.go
index bfbc6635b6..f65503d4e4 100644
--- a/pkg/analysis/output.go
+++ b/pkg/analysis/output.go
@@ -78,6 +78,9 @@ func (a *Analysis) textOutput() ([]byte, error) {
color.YellowString(result.Name), color.CyanString(result.ParentObject)))
for _, err := range result.Error {
output.WriteString(fmt.Sprintf("- %s %s\n", color.RedString("Error:"), color.RedString(err.Text)))
+ if err.KubernetesDoc != "" {
+ output.WriteString(fmt.Sprintf(" %s %s\n", color.RedString("Kubernetes Doc:"), color.RedString(err.KubernetesDoc)))
+ }
}
output.WriteString(color.GreenString(result.Details + "\n"))
}
diff --git a/pkg/analyzer/cronjob.go b/pkg/analyzer/cronjob.go
index b9ab541cee..e2b4310680 100644
--- a/pkg/analyzer/cronjob.go
+++ b/pkg/analyzer/cronjob.go
@@ -18,9 +18,11 @@ import (
"time"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
cron "github.com/robfig/cron/v3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type CronJobAnalyzer struct{}
@@ -28,6 +30,14 @@ type CronJobAnalyzer struct{}
func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "CronJob"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "batch",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -43,8 +53,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
for _, cronJob := range cronJobList.Items {
var failures []common.Failure
if cronJob.Spec.Suspend != nil && *cronJob.Spec.Suspend {
+ doc := apiDoc.GetApiDocV2("spec.suspend")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("CronJob %s is suspended", cronJob.Name),
+ Text: fmt.Sprintf("CronJob %s is suspended", cronJob.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: cronJob.Namespace,
@@ -59,8 +72,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
} else {
// check the schedule format
if _, err := CheckCronScheduleIsValid(cronJob.Spec.Schedule); err != nil {
+ doc := apiDoc.GetApiDocV2("spec.schedule")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("CronJob %s has an invalid schedule: %s", cronJob.Name, err.Error()),
+ Text: fmt.Sprintf("CronJob %s has an invalid schedule: %s", cronJob.Name, err.Error()),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: cronJob.Namespace,
@@ -78,9 +94,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
if cronJob.Spec.StartingDeadlineSeconds != nil {
deadline := time.Duration(*cronJob.Spec.StartingDeadlineSeconds) * time.Second
if deadline < 0 {
+ doc := apiDoc.GetApiDocV2("spec.startingDeadlineSeconds")
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("CronJob %s has a negative starting deadline", cronJob.Name),
+ Text: fmt.Sprintf("CronJob %s has a negative starting deadline", cronJob.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: cronJob.Namespace,
diff --git a/pkg/analyzer/deployment.go b/pkg/analyzer/deployment.go
index 99f2fd4254..d2d7b19ef1 100644
--- a/pkg/analyzer/deployment.go
+++ b/pkg/analyzer/deployment.go
@@ -18,8 +18,10 @@ import (
"fmt"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
)
@@ -31,6 +33,14 @@ type DeploymentAnalyzer struct {
func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "Deployment"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "apps",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -45,8 +55,11 @@ func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error)
for _, deployment := range deployments.Items {
var failures []common.Failure
if *deployment.Spec.Replicas != deployment.Status.Replicas {
+ doc := apiDoc.GetApiDocV2("spec.replicas")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas),
+ Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: deployment.Namespace,
diff --git a/pkg/analyzer/hpa.go b/pkg/analyzer/hpa.go
index 82de5a74d6..76e4a5ac00 100644
--- a/pkg/analyzer/hpa.go
+++ b/pkg/analyzer/hpa.go
@@ -17,10 +17,12 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type HpaAnalyzer struct{}
@@ -28,6 +30,14 @@ type HpaAnalyzer struct{}
func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "HorizontalPodAutoscaler"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "autoscaling",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -76,8 +86,11 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
}
if podInfo == nil {
+ doc := apiDoc.GetApiDocV2("spec.scaleTargetRef")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("HorizontalPodAutoscaler uses %s/%s as ScaleTargetRef which does not exist.", scaleTargetRef.Kind, scaleTargetRef.Name),
+ Text: fmt.Sprintf("HorizontalPodAutoscaler uses %s/%s as ScaleTargetRef which does not exist.", scaleTargetRef.Kind, scaleTargetRef.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: scaleTargetRef.Name,
@@ -94,8 +107,11 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
}
if containers <= 0 {
+ doc := apiDoc.GetApiDocV2("spec.scaleTargetRef.kind")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("%s %s/%s does not have resource configured.", scaleTargetRef.Kind, a.Namespace, scaleTargetRef.Name),
+ Text: fmt.Sprintf("%s %s/%s does not have resource configured.", scaleTargetRef.Kind, a.Namespace, scaleTargetRef.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: scaleTargetRef.Name,
diff --git a/pkg/analyzer/ingress.go b/pkg/analyzer/ingress.go
index 6ce2d21549..bc4ba084fb 100644
--- a/pkg/analyzer/ingress.go
+++ b/pkg/analyzer/ingress.go
@@ -17,8 +17,10 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type IngressAnalyzer struct{}
@@ -26,6 +28,14 @@ type IngressAnalyzer struct{}
func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "Ingress"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "networking",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -46,8 +56,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
if ingressClassName == nil {
ingClassValue := ing.Annotations["kubernetes.io/ingress.class"]
if ingClassValue == "" {
+ doc := apiDoc.GetApiDocV2("spec.ingressClassName")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Ingress %s/%s does not specify an Ingress class.", ing.Namespace, ing.Name),
+ Text: fmt.Sprintf("Ingress %s/%s does not specify an Ingress class.", ing.Namespace, ing.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: ing.Namespace,
@@ -68,8 +81,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
if ingressClassName != nil {
_, err := a.Client.GetClient().NetworkingV1().IngressClasses().Get(a.Context, *ingressClassName, metav1.GetOptions{})
if err != nil {
+ doc := apiDoc.GetApiDocV2("spec.ingressClassName")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Ingress uses the ingress class %s which does not exist.", *ingressClassName),
+ Text: fmt.Sprintf("Ingress uses the ingress class %s which does not exist.", *ingressClassName),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: *ingressClassName,
@@ -86,8 +102,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
for _, path := range rule.HTTP.Paths {
_, err := a.Client.GetClient().CoreV1().Services(ing.Namespace).Get(a.Context, path.Backend.Service.Name, metav1.GetOptions{})
if err != nil {
+ doc := apiDoc.GetApiDocV2("spec.rules.http.paths.backend.service")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Ingress uses the service %s/%s which does not exist.", ing.Namespace, path.Backend.Service.Name),
+ Text: fmt.Sprintf("Ingress uses the service %s/%s which does not exist.", ing.Namespace, path.Backend.Service.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: ing.Namespace,
@@ -106,8 +125,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
for _, tls := range ing.Spec.TLS {
_, err := a.Client.GetClient().CoreV1().Secrets(ing.Namespace).Get(a.Context, tls.SecretName, metav1.GetOptions{})
if err != nil {
+ doc := apiDoc.GetApiDocV2("spec.tls.secretName")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Ingress uses the secret %s/%s as a TLS certificate which does not exist.", ing.Namespace, tls.SecretName),
+ Text: fmt.Sprintf("Ingress uses the secret %s/%s as a TLS certificate which does not exist.", ing.Namespace, tls.SecretName),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: ing.Namespace,
diff --git a/pkg/analyzer/netpol.go b/pkg/analyzer/netpol.go
index a604c17547..aeb302dcd8 100644
--- a/pkg/analyzer/netpol.go
+++ b/pkg/analyzer/netpol.go
@@ -17,8 +17,10 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type NetworkPolicyAnalyzer struct{}
@@ -26,6 +28,14 @@ type NetworkPolicyAnalyzer struct{}
func (NetworkPolicyAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "NetworkPolicy"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "networking",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -45,8 +55,11 @@ func (NetworkPolicyAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error)
// Check if policy allows traffic to all pods in the namespace
if len(policy.Spec.PodSelector.MatchLabels) == 0 {
+ doc := apiDoc.GetApiDocV2("spec.podSelector.matchLabels")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Network policy allows traffic to all pods: %s", policy.Name),
+ Text: fmt.Sprintf("Network policy allows traffic to all pods: %s", policy.Name),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: policy.Name,
diff --git a/pkg/analyzer/pdb.go b/pkg/analyzer/pdb.go
index 1d472aac7b..2bc0efee7e 100644
--- a/pkg/analyzer/pdb.go
+++ b/pkg/analyzer/pdb.go
@@ -17,8 +17,10 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type PdbAnalyzer struct{}
@@ -26,6 +28,14 @@ type PdbAnalyzer struct{}
func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "PodDisruptionBudget"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "policy",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -49,8 +59,11 @@ func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
if evt.Reason == "NoPods" && evt.Message != "" {
if pdb.Spec.Selector != nil {
for k, v := range pdb.Spec.Selector.MatchLabels {
+ doc := apiDoc.GetApiDocV2("spec.selector.matchLabels")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("%s, expected label %s=%s", evt.Message, k, v),
+ Text: fmt.Sprintf("%s, expected label %s=%s", evt.Message, k, v),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: k,
@@ -64,15 +77,21 @@ func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
})
}
for _, v := range pdb.Spec.Selector.MatchExpressions {
+ doc := apiDoc.GetApiDocV2("spec.selector.matchExpressions")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("%s, expected expression %s", evt.Message, v),
- Sensitive: []common.Sensitive{},
+ Text: fmt.Sprintf("%s, expected expression %s", evt.Message, v),
+ KubernetesDoc: doc,
+ Sensitive: []common.Sensitive{},
})
}
} else {
+ doc := apiDoc.GetApiDocV2("spec.selector")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("%s, selector is nil", evt.Message),
- Sensitive: []common.Sensitive{},
+ Text: fmt.Sprintf("%s, selector is nil", evt.Message),
+ KubernetesDoc: doc,
+ Sensitive: []common.Sensitive{},
})
}
}
diff --git a/pkg/analyzer/service.go b/pkg/analyzer/service.go
index 666ec7f944..9293a8d9de 100644
--- a/pkg/analyzer/service.go
+++ b/pkg/analyzer/service.go
@@ -18,8 +18,10 @@ import (
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type ServiceAnalyzer struct{}
@@ -27,6 +29,14 @@ type ServiceAnalyzer struct{}
func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "Service"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -52,8 +62,11 @@ func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
}
for k, v := range svc.Spec.Selector {
+ doc := apiDoc.GetApiDocV2("spec.selector")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Service has no endpoints, expected label %s=%s", k, v),
+ Text: fmt.Sprintf("Service has no endpoints, expected label %s=%s", k, v),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: k,
@@ -72,14 +85,20 @@ func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
// Check through container status to check for crashes
for _, epSubset := range ep.Subsets {
+ apiDoc.Kind = "Endpoints"
+
if len(epSubset.NotReadyAddresses) > 0 {
for _, addresses := range epSubset.NotReadyAddresses {
count++
pods = append(pods, addresses.TargetRef.Kind+"/"+addresses.TargetRef.Name)
}
+
+ doc := apiDoc.GetApiDocV2("subsets.notReadyAddresses")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("Service has not ready endpoints, pods: %s, expected %d", pods, count),
- Sensitive: []common.Sensitive{},
+ Text: fmt.Sprintf("Service has not ready endpoints, pods: %s, expected %d", pods, count),
+ KubernetesDoc: doc,
+ Sensitive: []common.Sensitive{},
})
}
}
diff --git a/pkg/analyzer/statefulset.go b/pkg/analyzer/statefulset.go
index 42e33cd9ee..106c487c3d 100644
--- a/pkg/analyzer/statefulset.go
+++ b/pkg/analyzer/statefulset.go
@@ -17,8 +17,10 @@ import (
"fmt"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
+ "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
type StatefulSetAnalyzer struct{}
@@ -26,6 +28,14 @@ type StatefulSetAnalyzer struct{}
func (StatefulSetAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
kind := "StatefulSet"
+ apiDoc := kubernetes.K8sApiReference{
+ Kind: kind,
+ ApiVersion: schema.GroupVersion{
+ Group: "apps",
+ Version: "v1",
+ },
+ OpenapiSchema: a.OpenapiSchema,
+ }
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
"analyzer_name": kind,
@@ -44,8 +54,15 @@ func (StatefulSetAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
serviceName := sts.Spec.ServiceName
_, err := a.Client.GetClient().CoreV1().Services(sts.Namespace).Get(a.Context, serviceName, metav1.GetOptions{})
if err != nil {
+ doc := apiDoc.GetApiDocV2("spec.serviceName")
+
failures = append(failures, common.Failure{
- Text: fmt.Sprintf("StatefulSet uses the service %s/%s which does not exist.", sts.Namespace, serviceName),
+ Text: fmt.Sprintf(
+ "StatefulSet uses the service %s/%s which does not exist.",
+ sts.Namespace,
+ serviceName,
+ ),
+ KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: sts.Namespace,
diff --git a/pkg/common/types.go b/pkg/common/types.go
index 23dcf5d37c..35bd539442 100644
--- a/pkg/common/types.go
+++ b/pkg/common/types.go
@@ -17,6 +17,7 @@ import (
"context"
trivy "github.com/aquasecurity/trivy-operator/pkg/apis/aquasecurity/v1alpha1"
+ openapi_v2 "github.com/google/gnostic/openapiv2"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
appsv1 "k8s.io/api/apps/v1"
@@ -31,12 +32,13 @@ type IAnalyzer interface {
}
type Analyzer struct {
- Client *kubernetes.Client
- Context context.Context
- Namespace string
- AIClient ai.IAI
- PreAnalysis map[string]PreAnalysis
- Results []Result
+ Client *kubernetes.Client
+ Context context.Context
+ Namespace string
+ AIClient ai.IAI
+ PreAnalysis map[string]PreAnalysis
+ Results []Result
+ OpenapiSchema *openapi_v2.Document
}
type PreAnalysis struct {
@@ -65,8 +67,9 @@ type Result struct {
}
type Failure struct {
- Text string
- Sensitive []Sensitive
+ Text string
+ KubernetesDoc string
+ Sensitive []Sensitive
}
type Sensitive struct {
diff --git a/pkg/kubernetes/apireference.go b/pkg/kubernetes/apireference.go
new file mode 100644
index 0000000000..d62fbff27d
--- /dev/null
+++ b/pkg/kubernetes/apireference.go
@@ -0,0 +1,70 @@
+package kubernetes
+
+import (
+ "fmt"
+ "strings"
+
+ openapi_v2 "github.com/google/gnostic/openapiv2"
+)
+
+func (k *K8sApiReference) GetApiDocV2(field string) string {
+ startPoint := ""
+ // the path must be formated like "path1.path2.path3"
+ paths := strings.Split(field, ".")
+ group := strings.Split(k.ApiVersion.Group, ".")
+ definitions := k.OpenapiSchema.GetDefinitions().GetAdditionalProperties()
+
+ // extract the startpoint by searching the highest leaf corresponding to the requested group qnd kind
+ for _, prop := range definitions {
+ if strings.HasSuffix(prop.GetName(), fmt.Sprintf("%s.%s.%s", group[0], k.ApiVersion.Version, k.Kind)) {
+ startPoint = prop.GetName()
+
+ break
+ }
+ }
+
+ // recursively parse the definitions to find the description of the latest part of the given path
+ description := k.recursePath(definitions, startPoint, paths)
+
+ return description
+}
+
+func (k *K8sApiReference) recursePath(definitions []*openapi_v2.NamedSchema, leaf string, paths []string) string {
+ description := ""
+
+ for _, prop := range definitions {
+ // search the requested leaf
+ if prop.GetName() == leaf {
+ for _, addProp := range prop.GetValue().GetProperties().GetAdditionalProperties() {
+ // search the additional property of the leaf corresponding the current path
+ if addProp.GetName() == paths[0] {
+ // the last path or the path is string, we get the description and we go out
+ if len(paths) == 1 || addProp.GetValue().GetType().String() == "value:\"string\"" {
+ // extract the path description as we are at the end of the paths
+ description = addProp.GetValue().Description
+ } else {
+ // the path is an object, we extract the xref
+ if addProp.GetValue().GetXRef() != "" {
+ splitRef := strings.Split(addProp.GetValue().GetXRef(), "/")
+ reducedPaths := paths[1:]
+ description = k.recursePath(definitions, splitRef[len(splitRef)-1], reducedPaths)
+ }
+
+ // the path is an array, we take the first xref from the items
+ if len(addProp.GetValue().GetItems().GetSchema()) == 1 {
+ splitRef := strings.Split(addProp.GetValue().GetItems().GetSchema()[0].GetXRef(), "/")
+ reducedPaths := paths[1:]
+ description = k.recursePath(definitions, splitRef[len(splitRef)-1], reducedPaths)
+ }
+ }
+
+ break
+ }
+ }
+
+ break
+ }
+ }
+
+ return description
+}
diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go
index 926fa8c98c..342f1aa65e 100644
--- a/pkg/kubernetes/kubernetes.go
+++ b/pkg/kubernetes/kubernetes.go
@@ -22,12 +22,6 @@ import (
"k8s.io/kubectl/pkg/scheme"
)
-type Client struct {
- Client kubernetes.Interface
- RestClient rest.Interface
- Config *rest.Config
-}
-
func (c *Client) GetConfig() *rest.Config {
return c.Config
}
@@ -74,9 +68,15 @@ func NewClient(kubecontext string, kubeconfig string) (*Client, error) {
return nil, err
}
+ serverVersion, err := clientSet.ServerVersion()
+ if err != nil {
+ return nil, err
+ }
+
return &Client{
- Client: clientSet,
- RestClient: restClient,
- Config: config,
+ Client: clientSet,
+ RestClient: restClient,
+ Config: config,
+ ServerVersion: serverVersion,
}, nil
}
diff --git a/pkg/kubernetes/types.go b/pkg/kubernetes/types.go
new file mode 100644
index 0000000000..b97745a5b7
--- /dev/null
+++ b/pkg/kubernetes/types.go
@@ -0,0 +1,22 @@
+package kubernetes
+
+import (
+ openapi_v2 "github.com/google/gnostic/openapiv2"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/version"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+)
+
+type Client struct {
+ Client kubernetes.Interface
+ RestClient rest.Interface
+ Config *rest.Config
+ ServerVersion *version.Info
+}
+
+type K8sApiReference struct {
+ ApiVersion schema.GroupVersion
+ Kind string
+ OpenapiSchema *openapi_v2.Document
+}
diff --git a/pkg/server/analyze.go b/pkg/server/analyze.go
index ca5190c4d4..204f6ba803 100644
--- a/pkg/server/analyze.go
+++ b/pkg/server/analyze.go
@@ -32,6 +32,7 @@ func (h *handler) Analyze(ctx context.Context, i *schemav1.AnalyzeRequest) (
i.Nocache,
i.Explain,
int(i.MaxConcurrency),
+ false, // Kubernetes Doc disabled in server mode
)
if err != nil {
return &schemav1.AnalyzeResponse{}, err