-
Notifications
You must be signed in to change notification settings - Fork 85
[Temporary] Zenko: Single Node Metalk8s Deployment
Rached Ben Mustapha edited this page Mar 27, 2021
·
13 revisions
Instances with .
in their names cannot deploy Xcore - HD-1173
yum-config-manager --enable base extras updates
yum install yum-plugin-versionlock -y
CREDS='username:password'
ISO_PATH=$HOME
BASE_URL_ISO=https://packages.scality.com/RINGX/0.3
METALK8S_VERSION=2.7.1
ZENKO_VERSION=2.0.0-alpha.3
XCORE_VERSION=0.2.3
KEYCLOAK_VERSION=0.1.0-dev
CERT_MANAGER_VERSION=0.1.0-dev
METALK8S_ISO=$ISO_PATH/metalk8s-$METALK8S_VERSION.iso
KEYCLOAK_ISO=$ISO_PATH/keycloak-$KEYCLOAK_VERSION.iso
CERT_MANAGER_ISO=$ISO_PATH/cert-manager-$CERT_MANAGER_VERSION.iso
ZENKO_BASE_ISO=$ISO_PATH/zenko-base-$ZENKO_VERSION.iso
ZENKO_ISO=$ISO_PATH/zenko-$ZENKO_VERSION.iso
curl -u "$CREDS" "$BASE_URL_ISO/metalk8s.iso" -o $METALK8S_ISO
curl -u "$CREDS" "$BASE_URL_ISO/keycloak-$KEYCLOAK_VERSION.iso" -o $KEYCLOAK_ISO
curl -u "$CREDS" "$BASE_URL_ISO/cert-manager-$CERT_MANAGER_VERSION.iso" -o $CERT_MANAGER_ISO
curl -u "$CREDS" "$BASE_URL_ISO/zenko-base.iso" -o $ZENKO_BASE_ISO
curl -u "$CREDS" "$BASE_URL_ISO/zenko.iso" -o $ZENKO_ISO
Partition # | Size | Service | Zenko sizing |
---|---|---|---|
1 | 300GiB | Mongo | yes |
2 | 100GiB | Kafka | yes |
3 | 100GiB | S3Data | yes |
4 | 10GiB | Redis | yes |
5 | 10GiB | Zookeeper | yes |
6 | 10GiB | Keycloak | |
7 | 10GiB | Prometheus | |
8 | 10GiB | Alert-Manager | |
9 | 10GiB | Loki |
DISK_NAME=/dev/vdb
parted -a none ${DISK_NAME} --script \
mklabel gpt \
mkpart primary ext4 1MiB 320GiB \
mkpart primary ext4 320GiB 420GiB \
mkpart primary ext4 420GiB 520GiB \
mkpart primary ext4 520GiB 530GiB \
mkpart primary ext4 530GiB 540GiB \
mkpart primary ext4 540GiB 550GiB \
mkpart primary ext4 550GiB 560GiB \
mkpart primary ext4 560GiB 570GiB \
mkpart primary ext4 570GiB 580GiB
MOUNT_PATH=/srv/scality/metalk8s-$METALK8S_VERSION
mkdir -p $MOUNT_PATH
mkdir -p /etc/metalk8s
CONTROL_PLANE=10.100.0.0/16
WORKLOAD_PLANE=10.100.0.0/16
HOSTNAME="$(hostname)"
IP_ADDR="$(hostname -I | awk '{ print $1 }')"
cat > /etc/metalk8s/bootstrap.yaml <<-EOF
apiVersion: metalk8s.scality.com/v1alpha2
kind: BootstrapConfiguration
networks:
controlPlane: ${CONTROL_PLANE}
workloadPlane: ${WORKLOAD_PLANE}
ca:
minion: ${HOSTNAME}
apiServer:
host: ${IP_ADDR}
archives:
- ${METALK8S_ISO}
EOF
mount $METALK8S_ISO $MOUNT_PATH
${MOUNT_PATH}/bootstrap.sh --verbose
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl edit node
- Add Label to
metadata.labels
node-role.kubernetes.io/node: ""
- Delete
spec.taints
field
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/bootstrap
- effect: NoSchedule
key: node-role.kubernetes.io/infra
kubectl patch ippool default-ipv4-ippool \
--kubeconfig=/etc/kubernetes/admin.conf \
--type merge --patch "{\"spec\":{\"ipipMode\":\"Always\"}}"
DISK_NAME=/dev/vdb
NODE_NAME=$(hostname)
cat <<EOF | kubectl apply -f -
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: bootstrap-prometheus
spec:
nodeName: ${NODE_NAME}
storageClassName: metalk8s
rawBlockDevice:
devicePath: ${DISK_NAME}7
template:
metadata:
labels:
app.kubernetes.io/name: 'prometheus-operator-prometheus'
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: bootstrap-alertmanager
spec:
nodeName: ${NODE_NAME}
storageClassName: metalk8s
rawBlockDevice:
devicePath: ${DISK_NAME}8
template:
metadata:
labels:
app.kubernetes.io/name: 'prometheus-operator-alertmanager'
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: bootstrap-loki
spec:
nodeName: ${NODE_NAME}
storageClassName: metalk8s
rawBlockDevice:
devicePath: ${DISK_NAME}9
template:
metadata:
labels:
app.kubernetes.io/name: loki
---
EOF
kubectl wait --for condition=Ready=True --timeout 5s volume bootstrap-loki bootstrap-alertmanager bootstrap-prometheus
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh import --archive $CERT_MANAGER_ISO
kubectl apply -f /srv/scality/cert-manager-0.1.0-dev/cert-manager.yaml
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh import --archive $KEYCLOAK_ISO
kubectl apply -f /srv/scality/keycloak-0.1.0-dev/keycloak-single-node.yaml
NODE_NAME=$(hostname)
cat <<EOF | kubectl apply -f -
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: keycloak-postgres
spec:
nodeName: ${NODE_NAME}
storageClassName: ringx-keycloak
rawBlockDevice:
devicePath: ${DISK_NAME}6
---
EOF
kubectl -n ringx-auth rollout status --timeout 10m sts/keycloak
cat <<EOF | kubectl apply -f -
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: keycloak-public
namespace: ringx-auth
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/enable-cors: "true"
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: keycloak.zenko.local
http:
paths:
- backend:
serviceName: keycloak-http
servicePort: http
path: /
EOF
NODE_IP="put-the-node-ip"
echo "$NODE_IP keycloak.zenko.local iam.zenko.local sts.zenko.local management.zenko.local ui.zenko.local s3.zenko.local" >>/etc/hosts
The customer DNS must resolve the following FQDN:
- keycloak.zenko.local
- iam.zenko.local
- sts.zenko.local
- management.zenko.local
- ui.zenko.local
- s3.zenko.local
kubectl edit -n kube-system configmap coredns
Add rewrite name keycloak.zenko.local keycloak-http.ringx-auth.svc.cluster.local
to the Corefile
.
Edited Corefile
should look similar to the following:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
rewrite name keycloak.zenko.local keycloak-http.ringx-auth.svc.cluster.local
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
kubectl -n kube-system rollout restart deployment/coredns
kubectl -n ringx-auth exec keycloak-0 -- curl http://keycloak.zenko.local
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh create-env --name zenko
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh import --archive $ZENKO_BASE_ISO
sed "s/SOLUTION_ENV/zenko/g" /srv/scality/zenko-base-2.0.0-alpha.3/operator.yaml | kubectl apply -f -
kubectl -n zenko rollout status --timeout 10m deploy kubedb-operator
sed "s/SOLUTION_ENV/zenko/g" /srv/scality/zenko-base-2.0.0-alpha.3/operator.yaml | kubectl apply -f -
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh import --archive $ZENKO_ISO
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh activate --name zenko --version $ZENKO_VERSION
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh add-solution --name zenko --solution zenko --version $ZENKO_VERSION
kubectl -n zenko rollout status --timeout 10m deploy zenko-operator
REALM_NAME="zenko-realm"
CLIENT_ID="zenko-ui"
UI_ENDPOINT="http://ui.zenko.local"
kubectl -n ringx-auth exec -i keycloak-0 -- /opt/jboss/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin --password password
cat <<EOF | kubectl -n ringx-auth exec -i keycloak-0 -- /opt/jboss/keycloak/bin/kcadm.sh create realms -f -
{
"realm" : "${REALM_NAME}",
"enabled" : true,
"groups" : [ ],
"defaultRoles" : [ "uma_authorization", "offline_access" ],
"requiredCredentials" : [ "password" ],
"users" : [ ],
"clients" : [ {
"clientId" : "${CLIENT_ID}",
"rootUrl" : "${UI_ENDPOINT}",
"adminUrl" : "${UI_ENDPOINT}",
"surrogateAuthRequired" : false,
"enabled" : true,
"alwaysDisplayInConsole" : false,
"clientAuthenticatorType" : "client-secret",
"secret" : "",
"redirectUris" : [ "${UI_ENDPOINT}/*" ],
"webOrigins" : [ "${UI_ENDPOINT}" ],
"notBefore" : 0,
"bearerOnly" : false,
"consentRequired" : false,
"standardFlowEnabled" : true,
"implicitFlowEnabled" : false,
"directAccessGrantsEnabled" : true,
"serviceAccountsEnabled" : false,
"publicClient" : true,
"frontchannelLogout" : false,
"protocol" : "openid-connect",
"attributes" : { },
"authenticationFlowBindingOverrides" : { },
"fullScopeAllowed" : true,
"nodeReRegistrationTimeout" : -1,
"protocolMappers" : [ {
"name" : "instanceids_mapper",
"protocol" : "openid-connect",
"protocolMapper" : "oidc-usermodel-attribute-mapper",
"consentRequired" : false,
"config" : {
"multivalued" : "true",
"userinfo.token.claim" : "true",
"user.attribute" : "instanceIds",
"id.token.claim" : "true",
"access.token.claim" : "true",
"claim.name" : "instanceIds"
}
}, {
"name" : "role_mapper",
"protocol" : "openid-connect",
"protocolMapper" : "oidc-usermodel-attribute-mapper",
"consentRequired" : false,
"config" : {
"user.attribute" : "role",
"id.token.claim" : "true",
"access.token.claim" : "true",
"claim.name" : "role",
"userinfo.token.claim" : "true"
}
} ]
} ]
}
EOF
kubectl apply --namespace zenko -f /srv/scality/zenko-$ZENKO_VERSION/zenkoversion.yaml
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-300-g
labels:
zenko: storageclass
mountOptions:
- rw
- discard
parameters:
fsType: ext4
mkfsOptions: '["-m", "0"]'
provisioner: kubernetes.io/no-provisioner
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-100-g
labels:
zenko: storageclass
mountOptions:
- rw
- discard
parameters:
fsType: ext4
mkfsOptions: '["-m", "0"]'
provisioner: kubernetes.io/no-provisioner
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-10-g
labels:
zenko: storageclass
mountOptions:
- rw
- discard
parameters:
fsType: ext4
mkfsOptions: '["-m", "0"]'
provisioner: kubernetes.io/no-provisioner
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
---
EOF
DISK_NAME=/dev/vdb
NODE_NAME=$(hostname)
cat <<EOF | kubectl apply -f -
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: zenko-mongodb
spec:
nodeName: ${NODE_NAME}
storageClassName: sc-300-g
rawBlockDevice:
devicePath: ${DISK_NAME}1
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: zenko-kafka
spec:
nodeName: ${NODE_NAME}
storageClassName: sc-100-g
rawBlockDevice:
devicePath: ${DISK_NAME}2
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: zenko-s3data
spec:
nodeName: ${NODE_NAME}
storageClassName: sc-100-g
rawBlockDevice:
devicePath: ${DISK_NAME}3
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: zenko-redis
spec:
nodeName: ${NODE_NAME}
storageClassName: sc-10-g
rawBlockDevice:
devicePath: ${DISK_NAME}4
---
apiVersion: storage.metalk8s.scality.com/v1alpha1
kind: Volume
metadata:
name: zenko-zookeeper
spec:
nodeName: ${NODE_NAME}
storageClassName: sc-10-g
rawBlockDevice:
devicePath: ${DISK_NAME}5
---
EOF
kubectl wait --for condition=Ready=True --timeout 5s volume zenko-mongodb zenko-kafka zenko-s3data zenko-redis zenko-zookeeper
cat <<EOF | kubectl apply -n zenko -f -
apiVersion: zenko.io/v1alpha1
kind: Zenko
metadata:
name: zenko-instance
spec:
version: 2.0.0-alpha.3
replicas: 1
mongodb:
provider: KubeDB
persistence:
volumeClaimTemplate:
size: 300Gi
storageClassName: sc-300-g
redis:
provider: KubeDB
persistence:
volumeClaimTemplate:
size: 10Gi
storageClassName: sc-10-g
kafka:
provider: Managed
persistence:
volumeClaimTemplate:
size: 100Gi
storageClassName: sc-100-g
zookeeper:
provider: Managed
persistence:
volumeClaimTemplate:
size: 10Gi
storageClassName: sc-10-g
localData:
persistence:
volumeClaimTemplate:
size: 100Gi
storageClassName: sc-100-g
vault:
enable: true
iamIngress:
hostname: iam.zenko.local
stsIngress:
hostname: sts.zenko.local
replicas: 10
cloudserver:
replicas: 10
management:
provider: InCluster
ui:
ingress:
hostname: ui.zenko.local
oidc:
provider: 'http://keycloak.zenko.local/auth/realms/zenko-realm'
uiClientId: zenko-ui
vaultClientId: zenko-ui
api:
ingress:
hostname: management.zenko.local
allowFrom:
- 172.16.0.0/12
- 10.0.0.0/8
ingress:
workloadPlaneClass: 'nginx'
controlPlaneClass: 'nginx'
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 0m
EOF
kubectl wait --for condition=Available --timeout 10m -n zenko zenko/zenko-instance
OIDC_USER="zenko-tester"
INSTANCE_ID=$(kubectl -n zenko get zenko/zenko-instance -o jsonpath='{.status.instanceID}')
REALM_NAME="zenko-realm"
kubectl -n ringx-auth exec -i keycloak-0 -- /opt/jboss/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin --password password
cat <<EOF | kubectl -n ringx-auth exec -i keycloak-0 -- /opt/jboss/keycloak/bin/kcadm.sh create users -r "${REALM_NAME}" -f -
{
"username": "${OIDC_USER}",
"enabled": true,
"totp": false,
"emailVerified": true,
"firstName": "zenko",
"lastName": "tester",
"email": "${OIDC_USER}@zenko.local",
"attributes": {
"instanceIds": [
"${INSTANCE_ID}"
],
"role": [
"user"
]
},
"credentials": [],
"disableableCredentialTypes": [],
"requiredActions": [],
"realmRoles": [
"uma_authorization",
"offline_access"
],
"clientRoles": {
"account": [
"view-profile",
"manage-account"
]
},
"notBefore": 0,
"groups": []
}
EOF
kubectl -n ringx-auth exec -i keycloak-0 -- /opt/jboss/keycloak/bin/kcadm.sh set-password -r ${REALM_NAME} --username ${OIDC_USER}
- Add the following hosts to browser user's machine's
/etc/hosts
to resolve to metalk8s node's ip:- management.zenko.local
- keycloak.zenok.local
- ui.zenko.local
- s3.zenko.local
If the machine has port 80 open, the zenko-ui can be now be accessed through http://ui.zenko.local
.
The UI is limited to only creating accounts and locations.
Requirements: jq, curl
Context: from browser user's machine
OIDC_REALM='zenko-realm'
OIDC_CLIENT_ID='zenko-ui'
OIDC_USER='zenko-tester'
OIDC_USER_PASSWORD='password'
ZENKO_NAME='zenko-instance'
ACCESS_TOKEN=$(
curl -s -k "http://keycloak.zenko.local/auth/realms/${OIDC_REALM}/protocol/openid-connect/token" \
-d 'scope=openid' \
-d "client_id=${OIDC_CLIENT_ID}" \
-d "username=${OIDC_USER}" \
-d "password=${OIDC_USER_PASSWORD}" \
-d "grant_type=password" | \
jq -cr '.access_token'
)
TOKEN=$(
curl -s -k "http://keycloak.zenko.local/auth/realms/${OIDC_REALM}/protocol/openid-connect/token" \
-d 'scope=openid' \
-d "client_id=${OIDC_CLIENT_ID}" \
-d "username=${OIDC_USER}" \
-d "password=${OIDC_USER_PASSWORD}" \
-d "grant_type=password" | \
jq -cr '.id_token'
)
INSTANCE_ID=$(
curl -s -k "http://keycloak.zenko.local/auth/realms/${OIDC_REALM}/protocol/openid-connect/userinfo" \
-H "Authorization: bearer $ACCESS_TOKEN" | \
jq -rc '.instanceIds[0]'
)
ZENKO_ACCOUNT='test-account-1'
USER_PARAMS=$(
echo '{}' |
jq -c "
.userName=\"${ZENKO_ACCOUNT}\" |
.email=\"${ZENKO_ACCOUNT}@zenko.local\"
"
)
curl -s -k -X POST \
-H "X-Authentication-Token: ${TOKEN}" \
-H "Content-Type: application/json" \
-d "${USER_PARAMS}" \
"http://management.zenko.local/api/v1/config/${INSTANCE_ID}/user" | \
jq '.'
ENDPOINT_PARAMS=$(
echo '{}' |
jq -c "
.hostname=\"${ENDPOINT_HOSTNAME}\" |
.locationName=\"${LOCATION_NAME}\"
"
)
curl -s -k -X POST \
-H "X-Authentication-Token: ${TOKEN}" \
-H "Content-Type: application/json" \
-d "${ENDPOINT_PARAMS}" \
"http://management.zenko.local/api/v1/config/${INSTANCE_ID}/endpoint" | \
jq '.'
ZENKO_ACCOUNT='test-account-1'
curl -s -k -X POST \
-H "X-Authentication-Token: ${TOKEN}" \
-H "Content-Type: application/json" \
"http://management.zenko.local/api/v1/config/${INSTANCE_ID}/user/${ZENKO_ACCOUNT}/key" | \
jq '.'
curl -u '$CREDS' https://eve.devsca.com/github/scality/zenko/artifacts/builds/github:scality:zenko:staging-2.0.0.r210210230152.7c4bbdc.pre-merge.00015938/zenko-$ZENKO_VERSION.iso -o zenko-2.0.0-alpha.3.3.iso
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh import --archive /home/centos/zenko-2.0.0-alpha.3.3.iso
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh activate --name zenko --version 2.0.0-alpha.3
/srv/scality/metalk8s-$METALK8S_VERSION/solutions.sh add-solution --name zenko --solution zenko --version 2.0.0-alpha.3
note: --name is the name of the Kubernetes namespace