-
Notifications
You must be signed in to change notification settings - Fork 0
operator‐1901
Create a kubernetes cluster such as in https://github.com/allanrogerr/public/wiki/vm-broker-vanilla-k8s-cluster-on-multiple-instances,-same-node
ssh -p 20014 ubuntu@65.49.37.23 -o "ServerAliveInterval=5" -o "ServerAliveCountMax=100000" -o "StrictHostKeyChecking=off"
loginctl enable-linger ubuntu
cd ~
git clone https://github.com/allanrogerr/operator.git
cd operator && git checkout master
sudo apt-get update && sudo apt-get upgrade -y
sudo apt-get install vim -y
sudo apt-get install zip -y
sudo apt-get install make -y
sudo apt install build-essential -y
cd ~
wget https://go.dev/dl/go1.21.3.linux-amd64.tar.gz
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.21.3.linux-amd64.tar.gz
cat <<EOF >> $HOME/.profile
export PATH=$PATH:/usr/local/go/bin:~/go/bin
EOF
cat $HOME/.profile
source $HOME/.profile
go version
sudo apt-get install nodejs -y
sudo apt-get install npm -y
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.1/install.sh | bash
source ~/.bashrc
nvm install && nvm use && sudo npm install -g yarn
sudo apt-get install podman -y
sudo apt-get install podman-docker -y
podman run -d -p 5000:5000 --restart always --name registry registry:2
Note: make local registry insecure e.g. in /etc/containers/registries.conf
on each node
set
[[registry]]
insecure=true
location="10.62.75.170:5000"
Restart podman
podman restart registry
Edit Makefile
VERSION ?= $(shell git describe --tags --always)
VERSIONV ?= $(shell git describe --tags --always | sed 's,v,,g')
binary:
@CGO_ENABLED=0 GOOS=linux go build -trimpath -o minio-operator ./cmd/operator
docker: operator
@docker buildx build --no-cache --platform linux/amd64 -t $(TAG) .
Install https://github.com/mvdan/gofumpt then,
export CI=true
cd ~/operator && make assets && make build
podman tag localhost/minio/operator:dev 10.62.75.170:5000/minio/operator:0.1
podman images
podman push 10.62.75.170:5000/minio/operator:0.1 --tls-verify=false
podman search --list-tags 10.62.75.170:5000/minio/operator --tls-verify=false
or
curl -X GET http://10.62.75.170:5000/v2/_catalog
Add on each node e.g. podman pull 10.62.75.170:5000/minio/operator:0.1
Then
sudo vi /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."10.62.75.170:5000".tls] # edited line
ca_file = "" # edited line
cert_file = "" # edited line
insecure_skip_verify = true # edited line
key_file = "" # edited line
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.62.75.170:5000"] # edited line
endpoint = ["http://10.62.75.170:5000"] # edited line
On all nodes
sudo systemctl restart containerd
mkdir -p ~/mc && cd ~/mc && rm -rf mc* && wget https://dl.min.io/client/mc/release/linux-amd64/mc
chmod +x mc && cd ~
cd ~/operator/kubectl-minio
CGO_ENABLED=1 go build --ldflags "-s -w" -trimpath -o ../kubectl-minio
~/operator/kubectl-minio/kubectl-minio init --image 10.62.75.170:5000/minio/operator:0.1 --console-image 10.62.75.170:5000/minio/operator:0.1
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
kubectl patch service -n minio-operator console -p '{"spec":{"ports":[{"name": "http","port": 9090,"protocol": "TCP","nodePort":31090}],"type": "NodePort"}}'
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
kubectl get storageclass
http://k8s-master.minio.training:31090/login
Summary: As reported by user, attempt to create a tenant with 1 node and 1 drive with 1GiB storage
Observe no valid parity EC:0
available
Also observe that if an invalid Resource is specified, then the user navigates away then back to the main create page, the Create button is enabled incorrectly.
Also observe that if multiple parities were selectable using a particular drive/server configuration e.g. 4 nodes, 8 drives (giving EC:8,7,6,5,4,3,2
, THEN a 1 node 1 drive configuration were selected that those same parities EC:8,7,6,5,4,3,2
are selectable still, even though they cannot be applied to a 1 node 1 drive configuration.
Implement fix
cd ~/operator && git pull && git checkout recomment-trivial-parity
cd ~/operator/kubectl-minio
CGO_ENABLED=1 go build --ldflags "-s -w" -trimpath -o ../kubectl-minio
Compile and Push newly compile operator to docker. See above instructions for building
export CI=true
cd ~/operator && make assets && make build
podman tag localhost/minio/operator:dev 10.62.75.170:5000/minio/operator:0.3
podman images
podman push 10.62.75.170:5000/minio/operator:0.3 --tls-verify=false
podman search --list-tags 10.62.75.170:5000/minio/operator --tls-verify=false
Add on each node e.g. podman pull 10.62.75.170:5000/minio/operator:0.3
~/operator/kubectl-minio/kubectl-minio init --image 10.62.75.170:5000/minio/operator:0.3 --console-image 10.62.75.170:5000/minio/operator:0.3
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
kubectl patch service -n minio-operator console -p '{"spec":{"ports":[{"name": "http","port": 9090,"protocol": "TCP","nodePort":31090}],"type": "NodePort"}}'
kubectl --namespace myminio port-forward svc/test-hl 9000:9000 --address 0.0.0.0 & ~/mc/mc alias set operator-1901 http://127.0.0.1:9000 AZ9oDNs5dBHAdDQJ WHzr29dkg2VwTZdivkFv4zEI4hFLjqUk
Summary:
As reported by user, attempt to create a tenant with 1 node and 1 drive with 1GiB storage
Observe valid parity EC:0
is available
kubectl delete ns myminio
kubectl create ns myminio
Create tenant in console with parameters --namespace myminio --servers 4 --volumes 4 --capacity 4Gi --disable-tls Port forward
kubectl --namespace myminio port-forward svc/test-hl 9000:9000 --address 0.0.0.0 &
~/mc/mc alias set operator-1785 http://127.0.0.1:9000 minioadmin minioadmin --insecure
Create PVs for pool-0 and pool-1 - See yaml below
kubectl -n myminio delete pv/pv-k8s-0-test-pool-{0..1}-{0..3}
kubectl apply -f ~/pv.yml
Expand with kubectl-minio plugin
~/operator/kubectl-minio/kubectl-minio tenant expand test --pool pooltest --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Since volumes are mounted as root by default, on each k8s slave run
sudo chgrp -R ubuntu /opt/local-path-provisioner/pvc-*
sudo chown -R ubuntu /opt/local-path-provisioner/pvc-*
sudo chmod -R 777 /opt/local-path-provisioner/pvc-*
ls -la /opt/local-path-provisioner/
Decommission pool-0, remove from tenant yaml and then delete STSs
~/mc/mc admin decommission start operator-1785/ http://test-pool-0-{0...3}.test-hl.myminio.svc.cluster.local/export --insecure
~/mc/mc admin decommission status operator-1785 --insecure
Delete all STS, and expand tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Expand again using the console
Expand again with kubectl-minio plugin (not specifying pool name)
~/operator/kubectl-minio/kubectl-minio tenant expand test --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Observe pods are all scheduled
Console screenshot
Cleanup
kubectl delete ns myminio
Create tenant using kubectl-minio, Expand using console, Decommission 0pool, Expand using Console
kubectl delete ns myminio
kubectl create ns myminio
Create PVs for pool-0 and pool-1 - See yaml below
kubectl -n myminio delete pv/pv-k8s-0-test-pool-{0..1}-{0..3}
kubectl apply -f ~/pv.yml
Create tenant using kubectl-minio
~/operator/kubectl-minio/kubectl-minio tenant create test --namespace myminio --servers 4 --volumes 4 --capacity 4Gi --disable-tls --pool 0pool
Since volumes are mounted as root by default, on each k8s slave run
sudo chgrp -R ubuntu /opt/local-path-provisioner/pvc-*
sudo chown -R ubuntu /opt/local-path-provisioner/pvc-*
sudo chmod -R 777 /opt/local-path-provisioner/pvc-*
ls -la /opt/local-path-provisioner/
Port forward
kubectl --namespace myminio port-forward svc/test-hl 9000:9000 --address 0.0.0.0 &
~/mc/mc alias set operator-1785 http://127.0.0.1:9000 SFBN3US4KQAB3L70ZY8J MyDJKwBjWhGdQCqsUVy1MGuwwhzf29N9TvJNVX6P --insecure
Expand pod on tenant in console with parameters --pool pool-1 --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Decommission 0pool, remove from tenant yaml and then delete STSs
~/mc/mc admin decommission status operator-1785
~/mc/mc admin decommission start operator-1785/ http://test-0pool-{0...3}.test-hl.myminio.svc.cluster.local/export
~/mc/mc admin decommission status operator-1785
Expand tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio Observe pods are all scheduled Console screenshot
Cleanup
kubectl delete ns myminio
Summary: Create tenant using Console, tenant using Expand Console, Decommission pool-0, Expand tenant using Console
kubectl delete ns myminio
kubectl create ns myminio
Create tenant in console with parameters --namespace myminio --servers 4 --volumes 4 --capacity 4Gi --disable-tls
Port forward
kubectl --namespace myminio port-forward svc/test-hl 9000:9000 --address 0.0.0.0 &
~/mc/mc alias set operator-1785 http://127.0.0.1:9000 minioadmin minioadmin --insecure
Expand pod on tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Decommission pool-0, remove from tenant yaml and then delete STSs
~/mc/mc admin decommission status operator-1785
~/mc/mc admin decommission start operator-1785/ http://test-pool-0-{0...3}.test-hl.myminio.svc.cluster.local/export
~/mc/mc admin decommission status operator-1785
Expand tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Observe pods are all scheduled
Console screenshot
Cleanup
kubectl delete ns myminio
Create tenant using kubectl, Expand tenant using kubectl, Decommission pool-1, Expand tenant using Console
kubectl delete ns myminio
kubectl create ns myminio
Create PVs for pool-0 and pool-1 - See yaml below
kubectl -n myminio delete pv/pv-k8s-0-test-pool-{0..1}-{0..3}
kubectl apply -f ~/pv.yml
Since volumes are mounted as root by default, on each k8s slave run
sudo chgrp -R ubuntu /opt/local-path-provisioner/
sudo chown -R ubuntu /opt/local-path-provisioner/
sudo chmod -R 777 /opt/local-path-provisioner/
ls -la /opt/local-path-provisioner/
Create tenant using kubectl-minio ~/operator/kubectl-minio/kubectl-minio tenant create test --namespace myminio --servers 4 --volumes 4 --capacity 4Gi --disable-tls --pool pool-1
Port forward
kubectl --namespace myminio port-forward svc/test-hl 9000:9000 --address 0.0.0.0 &
~/mc/mc alias set operator-1785 http://127.0.0.1:9000 VNV1P1BG114MD401JLXE riSNDciaMfZR025jQBOjAd7oqiWUqtVmGvSQ9d0N --insecure
Expand pod on tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio
Decommission pool-1, remove from tenant yaml and then delete STSs
~/mc/mc admin decommission status operator-1785
~/mc/mc admin decommission start operator-1785/ http://test-pool-1-{0...3}.test-hl.myminio.svc.cluster.local/export
~/mc/mc admin decommission status operator-1785
Expand tenant in console with parameters --servers 4 --volumes 4 --capacity 4Gi --namespace myminio Observe pods are all scheduled
Console screenshot
Cleanup
kubectl delete ns myminio