Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for a list of k8s objects #221

Merged
merged 1 commit into from
Apr 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions fixtures/list_invalid.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
- apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: asd"
selector:
app: nginx
templates:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
174 changes: 174 additions & 0 deletions fixtures/list_valid.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
tier: backend
role: master
spec:
ports:
# the port that this service should serve on
- port: 6379
targetPort: 6379
selector:
app: redis
tier: backend
role: master
- apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: master
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: master
# tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379
- apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
tier: backend
role: slave
spec:
ports:
# the port that this service should serve on
- port: 6379
selector:
app: redis
tier: backend
role: slave
- apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: redis
role: slave
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379
- apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
# the port that this service should serve on
- port: 80
selector:
app: guestbook
tier: frontend
- apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
labels:
app: guestbook
tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80
20 changes: 19 additions & 1 deletion kubeval/kubeval.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,25 @@ func ValidateWithCache(input []byte, schemaCache map[string]*gojsonschema.Schema
return results, nil
}

bits := bytes.Split(input, []byte(detectLineBreak(input)+"---"+detectLineBreak(input)))
list := struct {
Version string
Kind string
Items []interface{}
}{}

unmarshalErr := yaml.Unmarshal(input, &list)
isYamlList := unmarshalErr == nil && list.Items != nil && len(list.Items) > 0

var bits [][]byte
if isYamlList {
bits = make([][]byte, len(list.Items))
for i, item := range list.Items {
b, _ := yaml.Marshal(item)
bits[i] = b
}
} else {
bits = bytes.Split(input, []byte(detectLineBreak(input)+"---"+detectLineBreak(input)))
}

var errors *multierror.Error

Expand Down
9 changes: 8 additions & 1 deletion kubeval/kubeval_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ func TestValidateValidInputs(t *testing.T) {
"extra_property.yaml",
"full_domain_group.yaml",
"unconventional_keys.yaml",
"list_valid.yaml",
}
for _, test := range tests {
filePath, _ := filepath.Abs("../fixtures/" + test)
Expand All @@ -60,6 +61,7 @@ func TestValidateValidInputsWithCache(t *testing.T) {
"extra_property.yaml",
"full_domain_group.yaml",
"unconventional_keys.yaml",
"list_valid.yaml",
}
schemaCache := make(map[string]*gojsonschema.Schema, 0)

Expand Down Expand Up @@ -147,14 +149,19 @@ func TestValidateInputsWithErrors(t *testing.T) {
var tests = []string{
"invalid.yaml",
"multi_invalid.yaml",
"list_invalid.yaml",
}
for _, test := range tests {
filePath, _ := filepath.Abs("../fixtures/" + test)
fileContents, _ := ioutil.ReadFile(filePath)
config := NewDefaultConfig()
config.FileName = test
results, _ := Validate(fileContents, config)
if len(results[0].Errors) == 0 {
errorCount := 0
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if this was intentional, but this was only checking the first result. My fixture yaml initially failed this test.

for _, result := range results {
errorCount += len(result.Errors)
}
if errorCount == 0 {
t.Errorf("Validate should not pass when testing invalid configuration in " + test)
}
}
Expand Down