From cb3f5c09d3389dcfba6d9d595ab80481aa6719f3 Mon Sep 17 00:00:00 2001 From: Bill Nickless <59262407+wknickless@users.noreply.github.com> Date: Mon, 10 Jan 2022 00:36:31 -0800 Subject: [PATCH] Restore safetyValve and singleUser when not using LDAP or OIDC (#212) * Fix NOTES.txt suitable for the default ClusterIP service * Add test to confirm persistent storage works (with possible fixes) * Add test to confirm LDAP works (with possible fixes) * Add test to confirm OIDC works (with possible fixes) * Adjust github workflow definition to run in master branch too --- .github/workflows/test-ldap.yml | 56 ++++ .github/workflows/test-oidc.yml | 59 ++++ .github/workflows/test-persistence.yml | 140 +++++++++ .github/workflows/test-safetyvalve.yml | 16 +- .github/workflows/test-singleuser.yml | 56 ++++ Chart.yaml | 2 +- README.md | 7 +- configs/authorizers-empty.xml | 282 ------------------ ....xml => login-identity-providers-ldap.xml} | 17 +- configs/nifi.properties | 17 +- templates/NOTES.txt | 10 +- templates/statefulset.yaml | 73 ++--- tests/01-safetyValve-values.yaml | 3 + tests/02-persistence-disabled-values.yaml | 8 + tests/02-persistence-enabled-values.yaml | 2 + tests/03-ldap-values.yaml | 14 + tests/03-ldap/deployment.yaml | 45 +++ tests/03-ldap/secret.yaml | 8 + tests/03-ldap/service.yaml | 14 + tests/04-oidc-keycloak-setup.bash | 85 ++++++ tests/04-oidc-login-test.js | 43 +++ tests/04-oidc-mocha-job.yaml | 27 ++ .../browserless-service.yaml | 14 + .../browserless-statefulset.yaml | 28 ++ .../keycloak-secret.yaml | 6 + .../keycloak-service.yaml | 14 + .../keycloak-statefulset.yaml | 45 +++ .../socks5-service.yaml | 14 + .../socks5-statefulset.yaml | 28 ++ tests/04-oidc-values.yaml | 17 ++ values.yaml | 3 +- 31 files changed, 801 insertions(+), 352 deletions(-) create mode 100644 .github/workflows/test-ldap.yml create mode 100644 .github/workflows/test-oidc.yml create mode 100644 .github/workflows/test-persistence.yml create mode 100644 .github/workflows/test-singleuser.yml delete mode 100644 configs/authorizers-empty.xml rename configs/{login-identity-providers.xml => login-identity-providers-ldap.xml} (86%) create mode 100644 tests/01-safetyValve-values.yaml create mode 100644 tests/02-persistence-disabled-values.yaml create mode 100644 tests/02-persistence-enabled-values.yaml create mode 100644 tests/03-ldap-values.yaml create mode 100644 tests/03-ldap/deployment.yaml create mode 100644 tests/03-ldap/secret.yaml create mode 100644 tests/03-ldap/service.yaml create mode 100755 tests/04-oidc-keycloak-setup.bash create mode 100644 tests/04-oidc-login-test.js create mode 100644 tests/04-oidc-mocha-job.yaml create mode 100644 tests/04-oidc-test-framework/browserless-service.yaml create mode 100644 tests/04-oidc-test-framework/browserless-statefulset.yaml create mode 100644 tests/04-oidc-test-framework/keycloak-secret.yaml create mode 100644 tests/04-oidc-test-framework/keycloak-service.yaml create mode 100644 tests/04-oidc-test-framework/keycloak-statefulset.yaml create mode 100644 tests/04-oidc-test-framework/socks5-service.yaml create mode 100644 tests/04-oidc-test-framework/socks5-statefulset.yaml create mode 100644 tests/04-oidc-values.yaml diff --git a/.github/workflows/test-ldap.yml b/.github/workflows/test-ldap.yml new file mode 100644 index 00000000..de270350 --- /dev/null +++ b/.github/workflows/test-ldap.yml @@ -0,0 +1,56 @@ +name: Test-LDAP + +on: + push: + pull_request: + +jobs: + test-ldap: + name: Test NiFi Helm Chart LDAP + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v2.4.1 + with: + minikube version: 'v1.20.0' + kubernetes version: 'v1.20.2' + - name: Checkout code + uses: actions/checkout@v1 + - name: Install dependencies + run: | + sudo apt-get install -y jq + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add dysnix https://dysnix.github.io/charts/ + helm repo update + helm dep up + - name: Install openldap + run: | + kubectl apply -f tests/03-ldap + kubectl wait --for=condition=Ready pod --selector=app.kubernetes.io/name=openldap --timeout=5m + - name: Install Nifi + run: helm install nifi . -f tests/03-ldap-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Wait for NiFi web server to start + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if kubectl logs pod/nifi-0 -c app-log | grep 'JettyServer NiFi has started' + then + exit 0 + fi + sleep 30 + done + echo NiFi did not start for 300 seconds! + exit 1 + - name: Check that LDAP login works + run: | + kubectl exec nifi-0 -c server -- curl -d username=user1 -d password=password1 -sk https://localhost:8443/nifi-api/access/token | \ + grep -v 'The supplied username and password are not valid.' + - name: Check that LDAP incorrect password fails + run: | + kubectl exec nifi-0 -c server -- curl -d username=user1 -d password=password2 -sk https://localhost:8443/nifi-api/access/token | \ + grep 'The supplied username and password are not valid.' + \ No newline at end of file diff --git a/.github/workflows/test-oidc.yml b/.github/workflows/test-oidc.yml new file mode 100644 index 00000000..b02e326f --- /dev/null +++ b/.github/workflows/test-oidc.yml @@ -0,0 +1,59 @@ +name: Test-OIDC + +on: + push: + pull_request: + +jobs: + test-oidc: + name: Test NiFi Helm Chart OIDC + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v2.4.1 + with: + minikube version: 'v1.20.0' + kubernetes version: 'v1.20.2' + - name: Checkout code + uses: actions/checkout@v1 + - name: Install dependencies + run: | + sudo apt-get install -y jq + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add dysnix https://dysnix.github.io/charts/ + helm repo update + helm dep up + - name: Install test framework components + run: | + kubectl apply -f tests/04-oidc-test-framework + kubectl create configmap 04-oidc-login-test --from-file=tests/04-oidc-login-test.js + kubectl wait --for=condition=Ready pod/browserless-0 --timeout=5m + kubectl wait --for=condition=Ready pod/keycloak-0 --timeout=5m + kubectl wait --for=condition=Ready pod/socks5-0 --timeout=5m + tests/04-oidc-keycloak-setup.bash + - name: Install Nifi + run: helm install nifi . -f tests/04-oidc-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Wait for NiFi web server to start + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if kubectl logs pod/nifi-0 -c app-log | grep 'JettyServer NiFi has started' + then + exit 0 + fi + sleep 30 + done + echo NiFi did not start for 300 seconds! + exit 1 + - name: Check that OIDC login works + run: | + kubectl apply -f tests/04-oidc-mocha-job.yaml + while ! kubectl logs -f job/oidc-mocha + do + sleep 5 + done + kubectl get job/oidc-mocha -o json | jq -e -r '.status.succeeded == 1' \ No newline at end of file diff --git a/.github/workflows/test-persistence.yml b/.github/workflows/test-persistence.yml new file mode 100644 index 00000000..971018f4 --- /dev/null +++ b/.github/workflows/test-persistence.yml @@ -0,0 +1,140 @@ +name: Test-Persistence + +on: + push: + pull_request: + +jobs: + test-persistence: + name: Test NiFi Helm Chart Persistence + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v2.4.1 + with: + minikube version: 'v1.20.0' + kubernetes version: 'v1.20.2' + - name: Checkout code + uses: actions/checkout@v1 + - name: Install dependencies + run: | + sudo apt-get install -y jq + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add dysnix https://dysnix.github.io/charts/ + helm repo update + helm dep up + - name: Install Nifi + run: helm install nifi . -f tests/02-persistence-enabled-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Get First .processGroupFlow.uri + id: first-pgfuri + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if NIFI_ACCESS_TOKEN=$(kubectl exec nifi-0 -c server -- curl -d username=username -d password=changemechangeme -sk https://localhost:8443/nifi-api/access/token) + then + PGFURI=$(kubectl exec nifi-0 -c server -- curl -H "Authorization: Bearer $NIFI_ACCESS_TOKEN" -sk https://localhost:8443/nifi-api/flow/process-groups/root | jq --raw-output .processGroupFlow.uri) + echo "::set-output name=PGFURI::$PGFURI" + exit 0 + fi + sleep 30 + done + echo NiFi did not provide an access token for 300 seconds! + exit 1 + - name: Delete chart + run: | + helm delete nifi + kubectl wait --for=delete pod/nifi-0 --timeout=120s + - name: Install NiFi + run: helm install nifi . -f tests/02-persistence-enabled-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Get Second .processGroupFlow.uri + id: second-pgfuri + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if NIFI_ACCESS_TOKEN=$(kubectl exec nifi-0 -c server -- curl -d username=username -d password=changemechangeme -sk https://localhost:8443/nifi-api/access/token) + then + PGFURI=$(kubectl exec nifi-0 -c server -- curl -H "Authorization: Bearer $NIFI_ACCESS_TOKEN" -sk https://localhost:8443/nifi-api/flow/process-groups/root | jq --raw-output .processGroupFlow.uri) + echo "::set-output name=PGFURI::$PGFURI" + exit 0 + fi + sleep 30 + done + echo NiFi did not provide an access token for 300 seconds! + exit 1 + - name: Compare 2 x root processGroupFlow .processGroupFlow.uri + run: | + echo Should be the same if persistence is enabled + test ${{ steps.first-pgfuri.outputs.PGFURI }} = ${{ steps.second-pgfuri.outputs.PGFURI }} + + test-non-persistence: + name: Test NiFi Helm Chart Non-Persistence + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v2.4.1 + with: + minikube version: 'v1.20.0' + kubernetes version: 'v1.20.2' + - name: Checkout code + uses: actions/checkout@v1 + - name: Install dependencies + run: | + sudo apt-get install -y jq + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add dysnix https://dysnix.github.io/charts/ + helm repo update + helm dep up + - name: Install Nifi + run: helm install nifi . -f tests/02-persistence-disabled-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Get First .processGroupFlow.uri + id: first-pgfuri + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if NIFI_ACCESS_TOKEN=$(kubectl exec nifi-0 -c server -- curl -d username=username -d password=changemechangeme -sk https://localhost:8443/nifi-api/access/token) + then + PGFURI=$(kubectl exec nifi-0 -c server -- curl -H "Authorization: Bearer $NIFI_ACCESS_TOKEN" -sk https://localhost:8443/nifi-api/flow/process-groups/root | jq --raw-output .processGroupFlow.uri) + echo "::set-output name=PGFURI::$PGFURI" + exit 0 + fi + sleep 30 + done + echo NiFi did not provide an access token for 300 seconds! + exit 1 + - name: Delete chart + run: | + helm delete nifi + kubectl wait --for=delete pod/nifi-0 --timeout=120s + - name: Install NiFi + run: helm install nifi . -f tests/02-persistence-disabled-values.yaml + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Get Second .processGroupFlow.uri + id: second-pgfuri + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if NIFI_ACCESS_TOKEN=$(kubectl exec nifi-0 -c server -- curl -d username=username -d password=changemechangeme -sk https://localhost:8443/nifi-api/access/token) + then + PGFURI=$(kubectl exec nifi-0 -c server -- curl -H "Authorization: Bearer $NIFI_ACCESS_TOKEN" -sk https://localhost:8443/nifi-api/flow/process-groups/root | jq --raw-output .processGroupFlow.uri) + echo "::set-output name=PGFURI::$PGFURI" + exit 0 + fi + sleep 30 + done + echo NiFi did not provide an access token for 300 seconds! + exit 1 + - name: Compare 2 x root processGroupFlow .processGroupFlow.uri + run: | + echo Should not be the same if persistence is not enabled + test ${{ steps.first-pgfuri.outputs.PGFURI }} != ${{ steps.second-pgfuri.outputs.PGFURI }} diff --git a/.github/workflows/test-safetyvalve.yml b/.github/workflows/test-safetyvalve.yml index d965f5b4..bcf999ed 100644 --- a/.github/workflows/test-safetyvalve.yml +++ b/.github/workflows/test-safetyvalve.yml @@ -2,11 +2,7 @@ name: Test-SafetyValve on: push: - branches: - - patch/properties-no-ldap-oidc pull_request: - branches: - - patch/properties-no-ldap-oidc jobs: test-safetyvalve: @@ -36,6 +32,18 @@ jobs: run: helm install nifi . -f tests/01-safetyValve-values.yaml - name: Check deployment status run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Wait for NiFi web server to start + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if kubectl logs pod/nifi-0 -c app-log | grep 'JettyServer NiFi has started' + then + exit 0 + fi + sleep 30 + done + echo NiFi did not start for 300 seconds! + exit 1 - name: Check safetyValve content is correct run: | NPFP=$(kubectl exec pod/nifi-0 -c server -- ps auxww | \ diff --git a/.github/workflows/test-singleuser.yml b/.github/workflows/test-singleuser.yml new file mode 100644 index 00000000..2bf5a341 --- /dev/null +++ b/.github/workflows/test-singleuser.yml @@ -0,0 +1,56 @@ +name: Test-SingleUser + +on: + push: + pull_request: + +jobs: + test-singleuser: + name: Test NiFi Helm Chart Single User + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v2.4.1 + with: + minikube version: 'v1.20.0' + kubernetes version: 'v1.20.2' + - name: Checkout code + uses: actions/checkout@v1 + - name: Install dependencies + run: | + sudo apt-get install -y jq + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add dysnix https://dysnix.github.io/charts/ + helm repo update + helm dep up + - name: Install openldap + run: | + kubectl apply -f tests/03-ldap + kubectl wait --for=condition=Ready pod --selector=app.kubernetes.io/name=openldap --timeout=5m + - name: Install Nifi + run: helm install nifi . + - name: Check deployment status + run: kubectl wait --for=condition=Ready pod/nifi-0 --timeout=20m + - name: Wait for NiFi web server to start + run: | + for n in [ 0 1 2 3 4 5 6 7 8 9 ] + do + if kubectl logs pod/nifi-0 -c app-log | grep 'JettyServer NiFi has started' + then + exit 0 + fi + sleep 30 + done + echo NiFi did not start for 300 seconds! + exit 1 + - name: Check that singleUser login works + run: | + kubectl exec nifi-0 -c server -- curl -d username=username -d password=changemechangeme -sk https://localhost:8443/nifi-api/access/token | \ + grep -v 'The supplied username and password are not valid.' + - name: Check that singleUser incorrect password fails + run: | + kubectl exec nifi-0 -c server -- curl -d username=username -d password=donotchangeme -sk https://localhost:8443/nifi-api/access/token | \ + grep 'The supplied username and password are not valid.' + \ No newline at end of file diff --git a/Chart.yaml b/Chart.yaml index 554e4c18..37baf079 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -1,7 +1,7 @@ --- apiVersion: v2 name: nifi -version: 1.0.3 +version: 1.0.4 appVersion: 1.14.0 description: Apache NiFi is a software project from the Apache Software Foundation designed to automate the flow of data between software systems. keywords: diff --git a/README.md b/README.md index 579a3999..b2408beb 100644 --- a/README.md +++ b/README.md @@ -139,9 +139,10 @@ The following table lists the configurable parameters of the nifi chart and the | **Oidc authentication** | `auth.oidc.enabled` | Enable User auth via oidc | `false` | | `auth.oidc.discoveryUrl` | oidc discover url | `https:///.well-known/openid-configuration` | -| `auth.oidc.clientId` | oidc clientId | `nil` | -| `auth.oidc.clientSecret` | oidc clientSecret | `nil` | -| `auth.oidc.claimIdentifyingUser` | oidc claimIdentifyingUser | `email` | +| `auth.oidc.clientId` | oidc clientId | `nil` | +| `auth.oidc.clientSecret` | oidc clientSecret | `nil` | +| `auth.oidc.claimIdentifyingUser` | oidc claimIdentifyingUser | `email` | +| `auth.oidc.admin` | Default OIDC admin identity | `nifi@example.com` | | **postStart** | | `postStart` | Include additional libraries in the Nifi containers by using the postStart handler | `nil` | | **Headless Service** | diff --git a/configs/authorizers-empty.xml b/configs/authorizers-empty.xml deleted file mode 100644 index a7e707dc..00000000 --- a/configs/authorizers-empty.xml +++ /dev/null @@ -1,282 +0,0 @@ -{{- $replicas := int .Values.replicaCount }} -{{- $chart := .Chart.Name }} -{{- $release := .Release.Name }} -{{- $fullname := include "apache-nifi.fullname" . }} -{{- $namespace := .Release.Namespace }} - - - - - - - file-user-group-provider - org.apache.nifi.authorization.FileUserGroupProvider - ./conf/users.xml - - {{- range $i := until $replicas }} - john - {{- end }} - {{- if .Values.auth.ldap.enabled}} - {{.Values.auth.ldap.admin}} - {{- end}} - - - {{- if .Values.auth.ldap.enabled}} - - ldap-user-group-provider - org.apache.nifi.ldap.tenants.LdapUserGroupProvider - SIMPLE - {{.Values.auth.ldap.admin}} - {{.Values.auth.ldap.pass}} - /opt/nifi/nifi-current/conf/{{.Release.Name}}-nifi-0.{{.Release.Name}}-nifi-headless.{{.Values.properties.namespace}}.svc.cluster.local/keystore.jks - {{.Values.auth.SSL.keystorePasswd}} - jks - /opt/nifi/nifi-current/conf/{{.Release.Name}}-nifi-0.{{.Release.Name}}-nifi-headless.{{.Values.properties.namespace}}.svc.cluster.local/truststore.jks - {{.Values.auth.SSL.truststorePasswd}} - JKS - NONE - TLS - false - IGNORE - 10 secs - 10 secs - {{.Values.auth.ldap.host}} - - 30 mins - {{.Values.auth.ldap.searchBase}} - person - ONE_LEVEL - {{.Values.auth.ldap.searchFilter}} - {{.Values.auth.ldap.UserIdentityAttribute}} - - - - group - ONE_LEVEL - - - - - - {{- end}} - - - {{- if .Values.auth.ldap.enabled}} - - composite-configurable-user-group-provider - org.apache.nifi.authorization.CompositeConfigurableUserGroupProvider - file-user-group-provider - ldap-user-group-provider - - {{- end}} - - - - - - - file-access-policy-provider - org.apache.nifi.authorization.FileAccessPolicyProvider - file-user-group-provider - ./conf/authorizations.xml - {{- if .Values.auth.ldap.enabled}} - {{.Values.auth.ldap.admin}} - {{- else }} - john - {{- end}} - - {{- if .Values.auth.ldap.enabled}} - {{- range $i := until $replicas }} - CN={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $namespace }}.svc.cluster.local, OU=NIFI - {{- end }} - {{- end }} - - - - - managed-authorizer - org.apache.nifi.authorization.StandardManagedAuthorizer - file-access-policy-provider - - - {{- if .Values.auth.ldap.enabled}} - - file-provider - org.apache.nifi.authorization.FileAuthorizer - ./conf/authorizations.xml - ./conf/users.xml - {{.Values.auth.ldap.admin}} - - - - {{- end}} - \ No newline at end of file diff --git a/configs/login-identity-providers.xml b/configs/login-identity-providers-ldap.xml similarity index 86% rename from configs/login-identity-providers.xml rename to configs/login-identity-providers-ldap.xml index b45ba06c..204d08a5 100644 --- a/configs/login-identity-providers.xml +++ b/configs/login-identity-providers-ldap.xml @@ -62,7 +62,6 @@ for. If the user never logs out, they will be required to log back in following this duration. --> - {{if .Values.auth.ldap.enabled}} ldap-provider org.apache.nifi.ldap.LdapProvider @@ -88,18 +87,4 @@ {{.Values.auth.ldap.IdentityStrategy}} 12 hours - {{end}} - - - + \ No newline at end of file diff --git a/configs/nifi.properties b/configs/nifi.properties index d31e97b2..4d33a0ec 100644 --- a/configs/nifi.properties +++ b/configs/nifi.properties @@ -157,11 +157,8 @@ nifi.security.truststoreType=jks nifi.security.truststorePasswd={{.Values.auth.SSL.truststorePasswd}} proxiedEntity={{.Values.auth.ldap.admin}} nifi.security.user.authorizer=file-provider -nifi.security.needClientAuth={{.Values.properties.needClientAuth}} nifi.security.user.login.identity.provider=ldap-provider -{{end}} - -{{if .Values.auth.oidc.enabled}} +{{else if .Values.auth.oidc.enabled}} nifi.security.keystore=/opt/nifi/nifi-current/conf/keystore.p12 nifi.security.keystoreType=PKCS12 nifi.security.keystorePasswd= @@ -169,9 +166,19 @@ nifi.security.keyPasswd= nifi.security.truststore=/opt/nifi/nifi-current/conf/truststore.p12 nifi.security.truststoreType=PKCS12 nifi.security.truststorePasswd= -nifi.security.needClientAuth={{.Values.properties.needClientAuth}} nifi.security.user.authorizer=managed-authorizer +{{ else }} +nifi.security.keystore=./conf/keystore.p12 +nifi.security.keystoreType=PKCS12 +nifi.security.keystorePasswd= +nifi.security.keyPasswd= +nifi.security.truststore=./conf/truststore.p12 +nifi.security.truststoreType=PKCS12 +nifi.security.truststorePasswd= +nifi.security.user.login.identity.provider=single-user-provider +nifi.security.user.authorizer=single-user-authorizer {{end}} +nifi.security.needClientAuth={{.Values.properties.needClientAuth}} {{if .Values.auth.oidc.enabled}} # OpenId Connect SSO Properties # diff --git a/templates/NOTES.txt b/templates/NOTES.txt index 16f51dfd..e1ddf03a 100644 --- a/templates/NOTES.txt +++ b/templates/NOTES.txt @@ -1,8 +1,6 @@ -Cluster endpoint IP address will be available at: -kubectl get svc {{ .Release.Name }} -n {{ .Release.Namespace }} -o jsonpath='{.status.loadBalancer.ingress[*].ip}' +To access the NiFi UI via kubectl port forwarding, +wait until the cluster is ready and then run: -Cluster endpoint domain name is: {{.Values.properties.webProxyHost}} - please update your DNS or /etc/hosts accordingly! +kubectl port-forward -n {{ .Release.Namespace}} svc/{{ .Release.Name }} {{ .Values.service.httpsPort }}:{{ .Values.service.httpsPort }} -Once you are done, your NiFi instance will be available at: - - {{ if .Values.properties.httpsPort }}https{{- else }}http{{ end }}://{{.Values.properties.webProxyHost}}/nifi +...and point your web browser to https://localhost:{{ .Values.service.httpsPort }}/nifi/ diff --git a/templates/statefulset.yaml b/templates/statefulset.yaml index ca8d008e..b88fd0e4 100644 --- a/templates/statefulset.yaml +++ b/templates/statefulset.yaml @@ -118,7 +118,6 @@ spec: - name: server imagePullPolicy: {{ .Values.image.pullPolicy | quote }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" -{{- if or (.Values.auth.ldap.enabled) (.Values.auth.oidc.enabled) }} command: - bash - -ce @@ -142,9 +141,20 @@ spec: cat "${NIFI_HOME}/conf/nifi.temp" > "${NIFI_HOME}/conf/nifi.properties" {{- if .Values.auth.ldap.enabled }} - cat "${NIFI_HOME}/conf/authorizers.temp" > "${NIFI_HOME}/conf/authorizers.xml" -{{- else }} - cat "${NIFI_HOME}/conf/authorizers.empty" > "${NIFI_HOME}/conf/authorizers.xml" + cat "${NIFI_HOME}/conf/authorizers.temp" > "${NIFI_HOME}/conf/authorizers.xml" + cat "${NIFI_HOME}/conf/login-identity-providers-ldap.xml" > "${NIFI_HOME}/conf/login-identity-providers.xml" +{{- else if .Values.auth.oidc.enabled }} + prop_replace nifi.security.user.login.identity.provider '' + prop_replace nifi.security.user.authorizer managed-authorizer + prop_replace nifi.security.user.oidc.discovery.url {{ .Values.auth.oidc.discoveryUrl }} + prop_replace nifi.security.user.oidc.client.id {{ .Values.auth.oidc.clientId }} + prop_replace nifi.security.user.oidc.client.secret {{ .Values.auth.oidc.clientSecret }} + prop_replace nifi.security.user.oidc.claim.identifying.user {{ .Values.auth.oidc.claimIdentifyingUser }} + xmlstarlet ed --inplace --delete "//authorizers/authorizer[identifier='single-user-authorizer']" "${NIFI_HOME}/conf/authorizers.xml" + xmlstarlet ed --inplace --update "//authorizers/userGroupProvider/property[@name='Initial User Identity 1']" -v {{ .Values.auth.oidc.admin }} "${NIFI_HOME}/conf/authorizers.xml" + xmlstarlet ed --inplace --update "//authorizers/accessPolicyProvider/property[@name='Initial Admin Identity']" -v {{ .Values.auth.oidc.admin }} "${NIFI_HOME}/conf/authorizers.xml" +{{- else if .Values.auth.singleUser.username }} + bin/nifi.sh set-single-user-credentials {{ .Values.auth.singleUser.username }} {{ .Values.auth.singleUser.password }} {{- end }} if ! test -f /opt/nifi/data/flow.xml.gz && test -f /opt/nifi/data/flow.xml; then @@ -163,14 +173,29 @@ spec: prop_replace nifi.web.proxy.host {{ template "apache-nifi.fullname" . }}.{{ .Release.Namespace }}.svc {{- end }} + if [ ! -r "${NIFI_HOME}/conf/nifi-cert.pem" ] + then + /opt/nifi/nifi-toolkit-current/bin/tls-toolkit.sh standalone \ + -n '{{.Release.Name}}-nifi-0.{{.Release.Name}}-nifi-headless.{{.Release.Namespace}}.svc.cluster.local' \ + -C '{{.Values.auth.admin}}' \ + -o "${NIFI_HOME}/conf/" \ + -P {{.Values.auth.SSL.truststorePasswd}} \ + -S {{.Values.auth.SSL.keystorePasswd}} \ + --nifiPropertiesFile /opt/nifi/nifi-current/conf/nifi.properties + fi + {{- if .Values.properties.safetyValve }} {{- range $prop, $val := .Values.properties.safetyValve }} prop_replace {{ $prop }} "{{ $val }}" nifi.properties {{- end }} {{- end }} - - exec bin/nifi.sh run & nifi_pid="$!" + for f in "${NIFI_HOME}/conf/authorizers.xml" "${NIFI_HOME}/conf/login-identity-providers.xml" ${NIFI_HOME}/conf/nifi.properties + do + echo === $f === + cat $f + done + echo === end of files === function offloadNode() { FQDN=$(hostname -f) @@ -227,13 +252,10 @@ spec: trap 'echo Received trapped signal, beginning shutdown...;offloadNode;./bin/nifi.sh stop;deleteNode;exit 0;' TERM HUP INT; trap ":" EXIT + exec bin/nifi.sh run & nifi_pid="$!" echo NiFi running with PID ${nifi_pid}. wait ${nifi_pid} - {{- if .Values.auth.ldap.enabled }} - /opt/nifi/nifi-toolkit-current/bin/tls-toolkit.sh standalone -n '{{.Release.Name}}-nifi-0.{{.Release.Name}}-nifi-headless.{{.Release.Namespace}}.svc.cluster.local' -C '{{.Values.auth.ldap.admin}}' -o '/opt/nifi/nifi-current/conf/' -P {{.Values.auth.SSL.truststorePasswd}} -S {{.Values.auth.SSL.keystorePasswd}} --nifiPropertiesFile /opt/nifi/nifi-current/conf/nifi.properties - exec bin/nifi.sh run - {{- end }} -{{- end }} + resources: {{ toYaml .Values.resources | indent 10 }} ports: @@ -255,10 +277,6 @@ spec: - name: NIFI_ZOOKEEPER_CONNECT_STRING value: {{ template "zookeeper.url" . }} {{- if not (or (.Values.auth.ldap.enabled) (.Values.auth.oidc.enabled)) }} - - name: SINGLE_USER_CREDENTIALS_USERNAME - value: {{ .Values.auth.singleUser.username }} - - name: SINGLE_USER_CREDENTIALS_PASSWORD - value: {{ .Values.auth.singleUser.password }} - name: NIFI_WEB_HTTPS_HOST value: 0.0.0.0 {{- end }} @@ -311,7 +329,6 @@ spec: volumeMounts: - name: "logs" mountPath: /opt/nifi/nifi-current/logs -{{- if or (.Values.auth.ldap.enabled) (.Values.auth.oidc.enabled) }} - name: "data" mountPath: /opt/nifi/data - name: "auth-conf" @@ -333,15 +350,12 @@ spec: - name: "authorizers-temp" mountPath: /opt/nifi/nifi-current/conf/authorizers.temp subPath: "authorizers.temp" - - name: "authorizers-empty" - mountPath: /opt/nifi/nifi-current/conf/authorizers.empty - subPath: "authorizers.empty" - name: "bootstrap-notification-services-xml" mountPath: /opt/nifi/nifi-current/conf/bootstrap-notification-services.xml subPath: "bootstrap-notification-services.xml" - - name: "login-identity-providers-xml" - mountPath: /opt/nifi/nifi-current/conf/login-identity-providers.xml - subPath: "login-identity-providers.xml" + - name: "login-identity-providers-ldap-xml" + mountPath: /opt/nifi/nifi-current/conf/login-identity-providers-ldap.xml + subPath: "login-identity-providers-ldap.xml" - name: "state-management-xml" mountPath: /opt/nifi/nifi-current/conf/state-management.xml subPath: "state-management.xml" @@ -386,7 +400,6 @@ spec: {{- if .Values.extraVolumeMounts }} {{ toYaml .Values.extraVolumeMounts | indent 10 }} {{- end }} -{{- end }} - name: app-log imagePullPolicy: {{ .Values.sidecar.imagePullPolicy | default "Always" | quote }} image: "{{ .Values.sidecar.image }}:{{ .Values.sidecar.tag }}" @@ -415,7 +428,6 @@ spec: - name: logs mountPath: /var/log volumes: -{{- if or (.Values.auth.ldap.enabled) (.Values.auth.oidc.enabled) }} - name: "bootstrap-conf" configMap: name: {{ template "apache-nifi.fullname" . }}-config @@ -434,24 +446,18 @@ spec: items: - key: "authorizers.xml" path: "authorizers.temp" - - name: "authorizers-empty" - configMap: - name: {{ template "apache-nifi.fullname" . }}-config - items: - - key: "authorizers-empty.xml" - path: "authorizers.empty" - name: "bootstrap-notification-services-xml" configMap: name: {{ template "apache-nifi.fullname" . }}-config items: - key: "bootstrap-notification-services.xml" path: "bootstrap-notification-services.xml" - - name: "login-identity-providers-xml" + - name: "login-identity-providers-ldap-xml" configMap: name: {{ template "apache-nifi.fullname" . }}-config items: - - key: "login-identity-providers.xml" - path: "login-identity-providers.xml" + - key: "login-identity-providers-ldap.xml" + path: "login-identity-providers-ldap.xml" - name: "state-management-xml" configMap: name: {{ template "apache-nifi.fullname" . }}-config @@ -480,7 +486,6 @@ spec: configMap: name: {{ .name }} {{- end }} -{{- end }} {{- if not .Values.persistence.enabled }} - name: config-data emptyDir: {} diff --git a/tests/01-safetyValve-values.yaml b/tests/01-safetyValve-values.yaml new file mode 100644 index 00000000..ffca4bca --- /dev/null +++ b/tests/01-safetyValve-values.yaml @@ -0,0 +1,3 @@ +properties: + safetyValve: + nifi.content.claim.max.appendable.size: "1 B" diff --git a/tests/02-persistence-disabled-values.yaml b/tests/02-persistence-disabled-values.yaml new file mode 100644 index 00000000..6b53a015 --- /dev/null +++ b/tests/02-persistence-disabled-values.yaml @@ -0,0 +1,8 @@ +persistence: + enabled: false + +zookeeper: + enabled: false + +registry: + enabled: false diff --git a/tests/02-persistence-enabled-values.yaml b/tests/02-persistence-enabled-values.yaml new file mode 100644 index 00000000..a434bf02 --- /dev/null +++ b/tests/02-persistence-enabled-values.yaml @@ -0,0 +1,2 @@ +persistence: + enabled: true diff --git a/tests/03-ldap-values.yaml b/tests/03-ldap-values.yaml new file mode 100644 index 00000000..ecf5b334 --- /dev/null +++ b/tests/03-ldap-values.yaml @@ -0,0 +1,14 @@ +zookeeper: + enabled: false + +registry: + enabled: false + +auth: + ldap: + enabled: true + host: ldap://openldap:389 + searchBase: ou=users,dc=example,dc=org + admin: cn=user1,ou=users,dc=example,dc=org + pass: password1 + authStrategy: SIMPLE diff --git a/tests/03-ldap/deployment.yaml b/tests/03-ldap/deployment.yaml new file mode 100644 index 00000000..462a697a --- /dev/null +++ b/tests/03-ldap/deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openldap + labels: + app.kubernetes.io/name: openldap +spec: + selector: + matchLabels: + app.kubernetes.io/name: openldap + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: openldap + spec: + containers: +# - name: netshoot +# image: docker.io/nicolaka/netshoot:latest +# imagePullPolicy: "Always" +# command: [ 'tail', '-f', '/dev/null' ] + - name: openldap + image: docker.io/bitnami/openldap:latest + imagePullPolicy: "Always" + env: + - name: LDAP_ADMIN_USERNAME + value: "admin" + - name: LDAP_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: adminpassword + name: openldap + - name: LDAP_USERS + valueFrom: + secretKeyRef: + key: users + name: openldap + - name: LDAP_PASSWORDS + valueFrom: + secretKeyRef: + key: passwords + name: openldap + ports: + - name: tcp-ldap + containerPort: 1389 diff --git a/tests/03-ldap/secret.yaml b/tests/03-ldap/secret.yaml new file mode 100644 index 00000000..992ef5aa --- /dev/null +++ b/tests/03-ldap/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openldap +stringData: + adminpassword: admin + passwords: password1,password2 + users: user1,user2 diff --git a/tests/03-ldap/service.yaml b/tests/03-ldap/service.yaml new file mode 100644 index 00000000..bf6b56e5 --- /dev/null +++ b/tests/03-ldap/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: openldap + labels: + app.kubernetes.io/name: openldap +spec: + type: ClusterIP + ports: + - name: tcp-ldap + port: 389 + targetPort: tcp-ldap + selector: + app.kubernetes.io/name: openldap diff --git a/tests/04-oidc-keycloak-setup.bash b/tests/04-oidc-keycloak-setup.bash new file mode 100755 index 00000000..f9fafe6c --- /dev/null +++ b/tests/04-oidc-keycloak-setup.bash @@ -0,0 +1,85 @@ +#!/bin/bash -x +# +# Create a realm and user to verify NiFi OIDC support works correctly + +# Find the SOCKS5 Proxy into the cluster + +S5IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP") | .address') +S5PORT=$(kubectl get service socks5 -o json | jq -r '.spec.ports[0].nodePort') + +CURL="curl -s --socks5-hostname $S5IP:$S5PORT" + +# NOTE: Strictly speaking the SOCKS5 cluster could be removed from the test harness +# by running curl through a kubectl exec in (say) the NiFi server container or +# directly through a kubectl run using the hub.docker.io/curlimages/curl image. +# Either way curl would be able to use the fully qualified domain name (FQDN). +# +# But doing it this way also allows someone debugging OIDC to set their +# workstation browser to use the SOCKS5 proxy (using the IP address and port +# discovered through the above kubectl commands, and with remote DNS resolution) +# to access the NiFi UI as https://nifi.default.svc.cluster.local:8843/nifi/ and +# confirm it all works--including the FQDN-based redirects from NiFi to Keycloak and +# back again. And it's very useful if (when, really) it doesn't work to have the full +# desktop browser debugging and tracing capabilities available. +# +# Also, while writing the tests it was sure nice having the full browser available +# to spelunk through the DOM of the Keycloak and NiFi pages to zero in on what +# to have puppeteer interact with, both in terms of sending text/clicks and +# scraping results. + +KCURL=http://keycloak.default.svc.cluster.local:8080/auth + +# Get a KeyCloak admin token + +KCAT=$($CURL \ + -d username=admin \ + -d password=admin \ + -d client_id=admin-cli \ + -d grant_type=password \ + $KCURL/realms/master/protocol/openid-connect/token | \ + jq --raw-output .access_token ) + +# Create the NiFi Realm + +$CURL \ + --request POST $KCURL/admin/realms/ \ + --header "Authorization: Bearer $KCAT" \ + --header "Content-Type: application/json" \ + --data-raw '{ + "realm":"nifi", + "displayName":"NiFi", + "enabled":"true" + }' + +# Create the NiFi User + +$CURL \ + --request POST $KCURL/admin/realms/nifi/users \ + --header "Authorization: Bearer $KCAT" \ + --header "Content-Type: application/json" \ + --data-raw '{ + "firstName":"NiFi", + "lastName":"User", + "username":"nifi", + "enabled":"true", + "email":"nifi@example.com", + "credentials":[ + { + "type":"password", + "value":"reallychangeme", + "temporary":"false" + } + ] + }' + +$CURL \ + --request POST $KCURL/admin/realms/nifi/clients \ + --header "Authorization: Bearer $KCAT" \ + --header "Content-Type: application/json" \ + --data-raw '{ + "clientId":"nifi", + "enabled":"true", + "redirectUris": [ "https://nifi.default.svc.cluster.local:8443/*" ], + "publicClient": "false", + "secret":"CZhA1IOePlXHz3PWqVwYoVAcYIUHTcDK" + }' \ No newline at end of file diff --git a/tests/04-oidc-login-test.js b/tests/04-oidc-login-test.js new file mode 100644 index 00000000..bf814f6f --- /dev/null +++ b/tests/04-oidc-login-test.js @@ -0,0 +1,43 @@ +const puppeteer = require ('puppeteer-core') +const expect = require('chai').expect + +describe('NiFi Login via OIDC', () => { + let browser + let page + + before(async () => { + browser = await puppeteer.connect({ + browserWSEndpoint: 'ws://browserless.default.svc.cluster.local:3000', + ignoreHTTPSErrors: true + }) + page = await browser.newPage() + }) + + it('NiFi redirects to KeyCloak login page', async () => { + await Promise.all([ + page.goto('https://nifi.default.svc.cluster.local:8443/nifi/'), + page.waitForNavigation(), + page.waitForNetworkIdle() + ]) + const pageTitle = await page.waitForSelector('h1[id="kc-page-title"]') + const titleContent = await pageTitle.evaluate(el => el.textContent) + expect(titleContent).to.include('Sign in to your account') + }).timeout(30000) + + it('nifi@example.com shown as logged in user', async () => { + await page.type('input[id="username"]','nifi') + await page.type('input[id="password"]','reallychangeme') + await Promise.all([ + page.click('input[id="kc-login"]'), + page.waitForNavigation(), + page.waitForNetworkIdle() + ]) + const currentUserElement = await page.waitForSelector('div[id="current-user"') + const userName = await currentUserElement.evaluate(el => el.textContent) + expect(userName).to.equal('nifi@example.com') + }).timeout(30000) + + after(async () => { + await browser.close() + }) +}) diff --git a/tests/04-oidc-mocha-job.yaml b/tests/04-oidc-mocha-job.yaml new file mode 100644 index 00000000..73529614 --- /dev/null +++ b/tests/04-oidc-mocha-job.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: oidc-mocha +spec: + template: + spec: + containers: + - name: node + image: node + command: + - /bin/bash + - -x + - -c + - | + yarn add puppeteer-core + yarn add chai + yarn add mocha + node_modules/mocha/bin/mocha /tests/04-oidc-login-test.js --timeout 30000 + volumeMounts: + - name: tests + mountPath: /tests + restartPolicy: Never + volumes: + - name: tests + configMap: + name: 04-oidc-login-test diff --git a/tests/04-oidc-test-framework/browserless-service.yaml b/tests/04-oidc-test-framework/browserless-service.yaml new file mode 100644 index 00000000..7ee96fec --- /dev/null +++ b/tests/04-oidc-test-framework/browserless-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: browserless + labels: + app.kubernetes.io/name: browserless +spec: + type: NodePort + ports: + - name: tcp-browserless + port: 3000 + targetPort: tcp-browserless + selector: + app.kubernetes.io/name: browserless diff --git a/tests/04-oidc-test-framework/browserless-statefulset.yaml b/tests/04-oidc-test-framework/browserless-statefulset.yaml new file mode 100644 index 00000000..c00a0646 --- /dev/null +++ b/tests/04-oidc-test-framework/browserless-statefulset.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: browserless + labels: + app.kubernetes.io/name: browserless +spec: + selector: + matchLabels: + app.kubernetes.io/name: browserless + serviceName: browserless + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: browserless + spec: + containers: + - name: browserless + image: browserless/chrome + imagePullPolicy: "Always" + ports: + - name: tcp-browserless + containerPort: 3000 + # - name: netshoot + # command: [ 'tail', '-f', '/dev/null' ] + # image: docker.io/nicolaka/netshoot:latest + # imagePullPolicy: "Always" diff --git a/tests/04-oidc-test-framework/keycloak-secret.yaml b/tests/04-oidc-test-framework/keycloak-secret.yaml new file mode 100644 index 00000000..b2e56e32 --- /dev/null +++ b/tests/04-oidc-test-framework/keycloak-secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: keycloak +stringData: + adminpassword: admin diff --git a/tests/04-oidc-test-framework/keycloak-service.yaml b/tests/04-oidc-test-framework/keycloak-service.yaml new file mode 100644 index 00000000..03aa64f1 --- /dev/null +++ b/tests/04-oidc-test-framework/keycloak-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: keycloak + labels: + app.kubernetes.io/name: keycloak +spec: + type: ClusterIP + ports: + - name: tcp-keycloak + port: 8080 + targetPort: tcp-keycloak + selector: + app.kubernetes.io/name: keycloak diff --git a/tests/04-oidc-test-framework/keycloak-statefulset.yaml b/tests/04-oidc-test-framework/keycloak-statefulset.yaml new file mode 100644 index 00000000..f40c8b04 --- /dev/null +++ b/tests/04-oidc-test-framework/keycloak-statefulset.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keycloak + labels: + app.kubernetes.io/name: keycloak +spec: + selector: + matchLabels: + app.kubernetes.io/name: keycloak + serviceName: keycloak + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: keycloak + spec: + containers: + - name: keycloak + image: quay.io/keycloak/keycloak:16.1.0 + imagePullPolicy: "Always" + env: + - name: KEYCLOAK_USER + value: "admin" + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + key: adminpassword + name: keycloak + ports: + - name: tcp-keycloak + containerPort: 8080 + startupProbe: + exec: + command: + - /bin/sh + - -x + - -c + - curl -s http://localhost:8080 | grep 'If you are not redirected automatically, follow this' + failureThreshold: 30 + periodSeconds: 10 +# - name: netshoot +# command: [ 'tail', '-f', '/dev/null' ] +# image: docker.io/nicolaka/netshoot:latest +# imagePullPolicy: "Always" diff --git a/tests/04-oidc-test-framework/socks5-service.yaml b/tests/04-oidc-test-framework/socks5-service.yaml new file mode 100644 index 00000000..675cb434 --- /dev/null +++ b/tests/04-oidc-test-framework/socks5-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: socks5 + labels: + app.kubernetes.io/name: socks5 +spec: + type: NodePort + ports: + - name: tcp-socks5 + port: 1080 + targetPort: tcp-socks5 + selector: + app.kubernetes.io/name: socks5 diff --git a/tests/04-oidc-test-framework/socks5-statefulset.yaml b/tests/04-oidc-test-framework/socks5-statefulset.yaml new file mode 100644 index 00000000..c3a7e3a7 --- /dev/null +++ b/tests/04-oidc-test-framework/socks5-statefulset.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: socks5 + labels: + app.kubernetes.io/name: socks5 +spec: + selector: + matchLabels: + app.kubernetes.io/name: socks5 + serviceName: socks5 + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: socks5 + spec: + containers: + - name: socks5 + image: serjs/go-socks5-proxy:latest + imagePullPolicy: "Always" + ports: + - name: tcp-socks5 + containerPort: 1080 + # - name: netshoot + # command: [ 'tail', '-f', '/dev/null' ] + # image: docker.io/nicolaka/netshoot:latest + # imagePullPolicy: "Always" diff --git a/tests/04-oidc-values.yaml b/tests/04-oidc-values.yaml new file mode 100644 index 00000000..c61e4576 --- /dev/null +++ b/tests/04-oidc-values.yaml @@ -0,0 +1,17 @@ +zookeeper: + enabled: false + +registry: + enabled: false + +auth: + oidc: + enabled: true + discoveryUrl: http://keycloak.default.svc.cluster.local:8080/auth/realms/nifi/.well-known/openid-configuration + clientId: nifi + clientSecret: CZhA1IOePlXHz3PWqVwYoVAcYIUHTcDK + admin: nifi@example.com + claimIdentifyingUser: email + +properties: + webProxyHost: nifi.default.svc.cluster.local:8443 diff --git a/values.yaml b/values.yaml index 4675ac68..bdf82cea 100644 --- a/values.yaml +++ b/values.yaml @@ -122,7 +122,8 @@ auth: discoveryUrl: #http://:/auth/realms//.well-known/openid-configuration clientId: # clientSecret: # - claimIdentifyingUser: preferred_username + claimIdentifyingUser: email + admin: nifi@example.com ## Request additional scopes, for example profile additionalScopes: