You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I installed a minio tenant with the following config:
---
apiVersion: v1
kind: Secret
metadata:
name: dev-console-secret
namespace: minio
type: Opaque
data:
CONSOLE_ACCESS_KEY: REDACTED
CONSOLE_HMAC_JWT_SECRET: REDACTED
CONSOLE_PBKDF_PASSPHRASE: REDACTED
CONSOLE_PBKDF_SALT: REDACTED
CONSOLE_SECRET_KEY: REDACTED
---
apiVersion: v1
data:
accesskey: REDACTED
secretkey: REDACTED
kind: Secret
metadata:
name: dev-creds-secret
namespace: minio
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: kes-config
namespace: minio
type: Opaque
stringData:
server-config.yaml: |-
address: 0.0.0.0:7373
root: _ # Effectively disabled since no root identity necessary.
tls:
key: /tmp/kes/server.key # Path to the TLS private key
cert: /tmp/kes/server.crt # Path to the TLS certificate
proxy:
identities: []
header:
cert: X-Tls-Client-Cert
policy:
my-policy:
paths:
- /v1/key/create/*
- /v1/key/generate/*
- /v1/key/decrypt/*
identities:
- ${MINIO_KES_IDENTITY}
cache:
expiry:
any: 5m0s
unused: 20s
log:
error: on
audit: off
keys:
vault:
endpoint: "https://vault.vault.svc:8200"
engine: "kes" # An optional K/V prefix. The server will store keys under this prefix.
kubernetes:
engine: "kubernetes"
role: "vault-kes"
jwt: "/var/run/secrets/kubernetes.io/serviceaccount/token"
retry: 15s
tls:
ca: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
status: # Vault status configuration. The server will periodically reach out to Vault to check its status.
ping: 10s # Duration until the server checks Vault's status again.
---
apiVersion: minio.min.io/v1
kind: Tenant
metadata:
name: dev
namespace: minio
spec:
certConfig: {}
console:
consoleSecret:
name: dev-console-secret
image: minio/console:v0.4.6
metadata:
name: dev
replicas: 2
resources: {}
credsSecret:
name: dev-creds-secret
image: minio/minio:RELEASE.2020-11-19T23-48-16Z
imagePullSecret: {}
mountPath: /export
requestAutoCert: true
serviceName: dev-internal-service
zones:
- resources: {}
servers: 1
volumeClaimTemplate:
apiVersion: v1
kind: persistentvolumeclaims
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 25Gi
storageClassName: freenas-nfs-csi
status: {}
volumesPerServer: 4
kes:
image: minio/kes:v0.13.3
replicas: 2
kesSecret:
name: kes-config
metadata:
labels:
app: kes
Env details:
⨯ REDACTED@DESKTOP-7JUP8RO ~\..\..\..\vault k version
Client Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.0", GitCommit:"af46c47ce925f4c4ad5cc8d1fca46c7b77d13b38", GitTreeState:"clean", BuildDate:"2020-12-08T17:59:43Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"windows/amd64"}
Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.4", GitCommit:"d360454c9bcd1634cf4cc52d1867af5491dc9c5f", GitTreeState:"clean", BuildDate:"2020-11-11T13:09:17Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"linux/amd64"}
REDACTED@DESKTOP-7JUP8RO ~\..\..\..\vault k describe deploy minio-operator
Name: minio-operator
Namespace: minio
CreationTimestamp: Sun, 17 Jan 2021 12:51:28 +0000
Labels: <none>
Annotations: deployment.kubernetes.io/revision: 1
Selector: name=minio-operator
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: name=minio-operator
Service Account: minio-operator
Containers:
minio-operator:
Image: minio/k8s-operator:v3.0.29
Port: <none>
Host Port: <none>
Environment:
CLUSTER_DOMAIN: cluster.local
WATCHED_NAMESPACE:
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets: <none>
NewReplicaSet: minio-operator-6f5b8cdcff (1/1 replicas created)
Events: <none>
Mahamed@DESKTOP-7JUP8RO ~\..\..\..\vault
The operator creates a job that fails to create the kes key.
## First error appears in the log of the job pod. I recreated that pod with a ubuntu debug box.
root@debug:/tmp# ./kes key create my-minio-key -k
Error: invalid key: illegal base64 data at input byte 0
root@debug:/tmp# ./kes key create my-minio-key -k^C
root@debug:/tmp# env | grep KES
KES_CLIENT_KEY=/tmp/public.key
KES_SERVER=https://dev-kes-hl-svc.minio.svc.cluster.local:7373
KES_CLIENT_CERT=/tmp/public.crt
root@debug:/tmp# ./kes key create my-minio-key -k
Error: invalid key: illegal base64 data at input byte 0
root@debug:/tmp# ./kes --version
kes version 0.13.3
I don't see any logs in the kes pods.
What might be causing this?
On sidenote, the KES pods don't use a custom service account so this k8s auth role has to be used which is bad:
I installed a minio tenant with the following config:
Env details:
The operator creates a job that fails to create the kes key.
I don't see any logs in the kes pods.
What might be causing this?
On sidenote, the KES pods don't use a custom service account so this k8s auth role has to be used which is bad:
The text was updated successfully, but these errors were encountered: