Skip to content

Commit

Permalink
Merge pull request #220 from ingvagabund/add-support-for-kubemark
Browse files Browse the repository at this point in the history
Add support for kubemark
  • Loading branch information
openshift-merge-robot authored Mar 1, 2019
2 parents 9b9b9f9 + 86992f9 commit 050a65a
Show file tree
Hide file tree
Showing 42 changed files with 1,188 additions and 126 deletions.
33 changes: 17 additions & 16 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ build-integration: ## Build integration test binary
mkdir -p bin
$(DOCKER_CMD) go build $(GOGCFLAGS) -o bin/integration github.com/openshift/machine-api-operator/test/integration

.PHONY: test-e2e
test-e2e: ## Run openshift specific e2e test
go test -timeout 60m \
-v ./vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \
Expand All @@ -60,6 +61,12 @@ test-e2e: ## Run openshift specific e2e test
-ginkgo.v \
-args -v 5 -logtostderr true

.PHONY: deploy-kubemark
deploy-kubemark:
kustomize build config | kubectl apply -f -
kustomize build | kubectl apply -f -
kubectl apply -f config/kubemark-install-config.yaml

.PHONY: test
test: ## Run tests
@echo -e "\033[32mTesting...\033[0m"
Expand Down
32 changes: 32 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,38 @@ However you can run it in a vanilla Kubernetes cluster by precreating some asset
- Then you can run it as a [deployment](install/0000_50_machine-api-operator_08_deployment.yaml)
- You should then be able to deploy a [cluster](test/integration/manifests/cluster.yaml) and a [machineSet](test/integration/manifests/machineset.yaml) object

## Machine API operator with Kubemark over Kubernetes

INFO: For development and testing purposes only

1. Deploy MAO over Kubernetes:
```sh
$ kustomize build | kubectl apply -f -
```

2. Deploy [Kubemark actuator](https://github.com/openshift/cluster-api-provider-kubemark) prerequisities:
```sh
$ kustomize build config | kubectl apply -f -
```

3. Create `cluster-config-v1` configmap to tell the MAO to deploy `kubemark` provider:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-config-v1
namespace: kube-system
data:
install-config: |-
platform:
kubemark: {}
```

The file is already present under `config/kubemark-install-config.yaml` so it's sufficient to run:
```sh
$ kubectl apply -f config/kubemark-install-config.yaml
```

## CI & tests

Run unit test:
Expand Down
9 changes: 7 additions & 2 deletions cmd/machine-api-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,16 @@ package main

import (
"flag"
"os"

"github.com/golang/glog"
"github.com/spf13/cobra"
)

var componentNamespace = "openshift-machine-api"

const (
componentName = "machine-api-operator"
componentNamespace = "openshift-machine-api"
componentName = "machine-api-operator"
)

var (
Expand All @@ -26,6 +28,9 @@ func init() {
}

func main() {
if namespace, ok := os.LookupEnv("COMPONENT_NAMESPACE"); ok {
componentNamespace = namespace
}
if err := rootCmd.Execute(); err != nil {
glog.Exitf("Error executing mao: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
metadata:
name: clusteroperators.config.openshift.io
spec:
additionalPrinterColumns:
- JSONPath: .status.version
description: The version the operator is at.
name: Version
type: string
- JSONPath: .status.conditions[?(@.type=="Available")].status
description: Whether the operator is running and stable.
name: Available
type: string
- JSONPath: .status.conditions[?(@.type=="Progressing")].status
description: Whether the operator is processing changes.
name: Progressing
type: string
- JSONPath: .status.conditions[?(@.type=="Failing")].status
description: Whether the operator is failing changes.
name: Failing
type: string
- JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
description: The time the operator's Available status last changed.
name: Since
type: date
group: config.openshift.io
names:
kind: ClusterOperator
listKind: ClusterOperatorList
plural: clusteroperators
singular: clusteroperator
shortNames:
- co
scope: Cluster
subresources:
status: {}
version: v1
versions:
- name: v1
served: true
storage: true
9 changes: 9 additions & 0 deletions config/kubemark-install-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-config-v1
namespace: kube-system
data:
install-config: |-
platform:
kubemark: {}
80 changes: 80 additions & 0 deletions config/kubemark.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: deleteunreadynodes
namespace: kube-system
data:
entrypoint.sh: |-
#!/bin/bash
while true; do
echo "Checking NotReady nodes"
for node in $(kubectl get nodes -o json | jq '.items[].metadata.name' --raw-output); do
echo "Checking node $node"
taint=$(kubectl get nodes $node -o json | jq '.spec | select(.taints!=null) | .taints[] | select(.key=="kubemark") | select (.!=null) | select(.value=="true")' | wc -l)
if [ $taint -eq 0 ]; then
echo "Skipping $node, no 'kubemark' taint found"
continue
fi
status=$(kubectl get node $node -o json | jq '.status.conditions[] | select(.type=="Ready") | .status' --raw-output)
if [ $status != "Unknown" ]; then
continue
fi
# Delete node
echo "Deleting node $node"
kubectl delete node $node
done
sleep 10s
done
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: machineapi-kubemark-controllers
namespace: kube-system
labels:
api: machineapi
k8s-app: kubemark
spec:
selector:
matchLabels:
api: machineapi
k8s-app: kubemark
replicas: 1
template:
metadata:
labels:
api: machineapi
k8s-app: kubemark
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
containers:
- name: unready-nodes-gb
image: gofed/kubemark-machine-controllers:v1.0
command:
- /bin/entrypoint.sh
volumeMounts:
- name: deleteunreadynodes
mountPath: /bin/entrypoint.sh
readOnly: true
subPath: entrypoint.sh
volumes:
- name: deleteunreadynodes
configMap:
defaultMode: 0700
name: deleteunreadynodes
Loading

0 comments on commit 050a65a

Please sign in to comment.