diff --git a/app_serving/abort-java-app.yaml b/app_serving/abort-java-app.yaml index dd3c12f3..7e631eb2 100644 --- a/app_serving/abort-java-app.yaml +++ b/app_serving/abort-java-app.yaml @@ -20,8 +20,8 @@ spec: value: "" - name: organization_id value: "ose4j7p39" - - name: strategy - value: "blue-green" + - name: project_id + value: "" # tks_info service URL - name: tks_api_url value: "http://tks-api.tks.svc:9110" @@ -37,6 +37,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -57,6 +59,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -78,6 +82,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -96,6 +102,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -131,7 +139,6 @@ spec: ABORT_LOG='/mnt/out/abort_output.log' app_name={{workflow.parameters.app_name}} ns={{workflow.parameters.namespace}} - strategy={{workflow.parameters.strategy}} # Prepare kubeconfig echo "Preparing kubeconfig for target cluster..." | tee -a $ABORT_LOG diff --git a/app_serving/delete-java-app.yaml b/app_serving/delete-java-app.yaml index 0612aaad..c2673160 100644 --- a/app_serving/delete-java-app.yaml +++ b/app_serving/delete-java-app.yaml @@ -19,7 +19,9 @@ spec: - name: asa_task_id value: "" - name: organization_id - value: "ose4j7p39" + value: "" + - name: project_id + value: "" - name: tks_api_url value: "http://tks-api.tks.svc:9110" @@ -34,6 +36,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -54,6 +58,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -72,6 +78,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id diff --git a/app_serving/promote-java-app.yaml b/app_serving/promote-java-app.yaml index 7ea518fd..96233b05 100644 --- a/app_serving/promote-java-app.yaml +++ b/app_serving/promote-java-app.yaml @@ -20,6 +20,8 @@ spec: value: "" - name: organization_id value: "ose4j7p39" + - name: project_id + value: "" - name: strategy value: "blue-green" # tks_info service URL @@ -37,6 +39,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -57,6 +61,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -78,6 +84,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -96,6 +104,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id diff --git a/app_serving/rollback-java-app.yaml b/app_serving/rollback-java-app.yaml index fff5b0c6..ffe0cd8d 100644 --- a/app_serving/rollback-java-app.yaml +++ b/app_serving/rollback-java-app.yaml @@ -10,6 +10,8 @@ spec: parameters: - name: organization_id value: "ose4j7p39" + - name: project_id + value: "" - name: target_cluster_id value: "C011b88fa" - name: app_name @@ -37,6 +39,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -57,6 +61,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -75,6 +81,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -93,6 +101,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id diff --git a/app_serving/serve-java-app.yaml b/app_serving/serve-java-app.yaml index be0a761f..942d0986 100644 --- a/app_serving/serve-java-app.yaml +++ b/app_serving/serve-java-app.yaml @@ -24,9 +24,11 @@ spec: - name: app_type value: "springboot" - name: target_cluster_id - value: "cmibsrdnq" + value: "" - name: organization_id - value: "ose4j7p39" + value: "" + - name: project_id + value: "" - name: app_name value: "spring-petclinic" - name: namespace @@ -106,6 +108,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -123,6 +127,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -140,6 +146,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -162,6 +170,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -182,6 +192,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -199,6 +211,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -219,6 +233,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id @@ -238,6 +254,8 @@ spec: parameters: - name: organization_id value: "{{workflow.parameters.organization_id}}" + - name: project_id + value: "{{workflow.parameters.project_id}}" - name: asa_id value: "{{workflow.parameters.asa_id}}" - name: asa_task_id diff --git a/cloud-accounts/aws-multi-tenancy-iam-resources.yaml b/cloud-accounts/aws-multi-tenancy-iam-resources.yaml index 8d15a398..6c6c9c52 100644 --- a/cloud-accounts/aws-multi-tenancy-iam-resources.yaml +++ b/cloud-accounts/aws-multi-tenancy-iam-resources.yaml @@ -73,6 +73,7 @@ spec: - \"iam:TagOpenIDConnectProvider\" - \"iam:CreatePolicy\" - \"iam:GetPolicy\" + - \"iam:DeletePolicy\" - \"iam:AttachRolePolicy\" - \"iam:DetachRolePolicy\" - \"iam:CreateRole\" diff --git a/deploy_apps/tks-lma-federation-wftpl.yaml b/deploy_apps/tks-lma-federation-wftpl.yaml index bc2f84b2..b3b061cd 100644 --- a/deploy_apps/tks-lma-federation-wftpl.yaml +++ b/deploy_apps/tks-lma-federation-wftpl.yaml @@ -28,7 +28,7 @@ spec: - name: console_url value: "https://tks-console-dev.taco-cat.xyz" - name: alert_tks - value: "https://tks-api-dev.taco-cat.xyz/system-api/1.0/alerts" + value: "https://tks-api-dev.taco-cat.xyz/system-api/1.0/system-notifications" - name: alert_slack value: "https://hooks.slack.com/services/fixme" ########################## @@ -46,9 +46,6 @@ spec: value: "" volumes: - - name: tks-proto-vol - configMap: - name: tks-proto - name: kubeconfig-adm secret: secretName: tks-admin-kubeconfig-secret @@ -169,12 +166,16 @@ spec: value: "{{ workflow.parameters.github_account }}/{{workflow.parameters.cluster_id}}" when: "{{steps.update-auth-oidc-grafana.outputs.parameters.is_changed}} == YES" - - - name: argocd-sync-wait - template: argocd-sync-wait + - - name: garafana-sync-wait + templateRef: + name: create-application + template: argocd-sync-wait arguments: parameters: - name: cluster_id value: '{{workflow.parameters.cluster_id}}' + - name: appname + value: 'grafana' - - name: grafana-restart template: grafana-restart @@ -312,9 +313,9 @@ spec: sleep 5 done - grafana_ep_secret=$(kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d ) - - if [[ ${grafana_ep_secret} == "" ]]; then + grafana_ep_secret=$(kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d ) || grafana_ep_secret="" + + if [ "$grafana_ep_secret" == "" ]; then while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do if [[ "$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]]; then log "FAIL" "A service for the grafana in ${cluster_id} is not configured properly.(No LoadBalancer)" @@ -447,35 +448,6 @@ spec: path: /mnt/out/changed.txt default: "NO" - - name: argocd-sync-wait - inputs: - parameters: - - name: cluster_id - container: - name: argocd-sync-wait - image: harbor.taco-cat.xyz/tks/argocd-cli:v2.2.5 - command: - - /bin/bash - - '-c' - - | - # log into Argo CD server - ./argocd login $ARGO_SERVER --plaintext --insecure --username $ARGO_USERNAME \ - --password $ARGO_PASSWORD - - app_name={{inputs.parameters.cluster_id}}-grafana - - # sync app - echo "sync app $app_name" - ./argocd app sync $app_name - - # wait for sync - ./argocd app wait $app_name --sync - - envFrom: - - secretRef: - name: "decapod-argocd-config" - activeDeadlineSeconds: 900 - - name: grafana-restart inputs: parameters: diff --git a/deploy_apps/tks-policy-wftpl.yaml b/deploy_apps/tks-policy-wftpl.yaml new file mode 100644 index 00000000..23ec9450 --- /dev/null +++ b/deploy_apps/tks-policy-wftpl.yaml @@ -0,0 +1,191 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: tks-policy + namespace: argo +spec: + entrypoint: deploy + arguments: + parameters: + - name: site_name + value: "coyar0qx1" + - name: revision + value: "main" + - name: app_prefix + value: "{{workflow.parameters.site_name}}" + + volumes: + - name: kubeconfig-adm + secret: + secretName: tks-admin-kubeconfig-secret + + templates: + - name: deploy + inputs: + parameters: + - name: appname + value: "policy-resources" + - name: cluster_id + value: "{{inputs.parameters.cluster_id}}" + - name: contract_id + value: "{{ inputs.parameters.contract_id }}" + - name: policy_ids + value: "{{ inputs.parameters.policy_ids }}" + steps: + - - name: createNamespace + template: createNamespace + arguments: + parameters: + - name: target_namespace + value: gatekeeper-system + - - name: deploy-policy-operator + templateRef: + name: create-application + template: installApps + arguments: + parameters: + - name: list + value: | + [ + { "app_group": "policy", "path": "opa-gatekeeper", "namespace": "gatekeeper-system", "target_cluster": "" } + ] + - - name: deploy-default-policy-resources + templateRef: + name: create-application + template: installApps + arguments: + parameters: + - name: list + value: | + [ + { "app_group": "policy", "path": "policy-resources", "namespace": "gatekeeper-system", "target_cluster": "" } + ] + - - name: createClusterCR + template: createClusterCR + arguments: + parameters: + - name: contract_id + value: "{{inputs.parameters.contract_id}}" + - name: cluster_id + value: "{{inputs.parameters.cluster_id}}" + + - - name: argocd-sync-wait + templateRef: + name: create-application + template: argocd-sync-wait + arguments: + parameters: + - name: cluster_id + value: "{{inputs.parameters.cluster_id}}" + - name: appname + value: "policy-resources" + + - - name: apply-policies + templateRef: + name: tks-apply-policies + template: applyPolicies + arguments: + parameters: + - name: contract_id + value: "{{inputs.parameters.contract_id}}" + - name: cluster_id + value: "{{inputs.parameters.cluster_id}}" + - name: policy_ids + value: "{{inputs.parameters.policy_ids}}" + + - name: createNamespace + inputs: + parameters: + - name: target_namespace + container: + name: create-namespace + image: harbor.taco-cat.xyz/tks/hyperkube:v1.18.6 + command: + - /bin/bash + - '-c' + - | + function log() { + level=$1 + msg=$2 + date=$(date '+%F %H:%M:%S') + echo "[$date] $level $msg" + } + + kube_secret=$(kubectl get secret -n {{workflow.parameters.cluster_id}} {{workflow.parameters.cluster_id}}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d) + echo -e "kube_secret:\n$kube_secret" | head -n 5 + cat <<< "$kube_secret" > /etc/kubeconfig + + kubectl --kubeconfig=/etc/kubeconfig get ns ${TARGET_NAMESPACE} + if [[ $? =~ 1 ]]; then + kubectl --kubeconfig=/etc/kubeconfig create ns ${TARGET_NAMESPACE} + kubectl --kubeconfig=/etc/kubeconfig label ns ${TARGET_NAMESPACE} name=${TARGET_NAMESPACE} + kubectl --kubeconfig=/etc/kubeconfig label ns ${TARGET_NAMESPACE} taco-tls=enabled + log "INFO" "${TARGET_NAMESPACE} successfully created." + fi + env: + - name: TARGET_NAMESPACE + value: "{{inputs.parameters.target_namespace}}" + activeDeadlineSeconds: 900 + retryStrategy: + limit: 2 + + - name: createClusterCR + inputs: + parameters: + - name: contract_id + - name: cluster_id + container: + name: createClusterCR + image: harbor.taco-cat.xyz/tks/tks-cluster-init:v1.0.0 + command: + - /bin/bash + - '-c' + - | + function log() { + level=$1 + msg=$2 + date=$(date '+%F %H:%M:%S') + echo "[$date] $level $msg" + } + cp /kube/value kubeconfig_adm + export KUBECONFIG=kubeconfig_adm + + + if [ $(kubectl get crd tksclusters.tkspolicy.openinfradev.github.io --ignore-not-found | grep -v NAME | wc -l) -eq 0 ]; then + log "ERROR" "TKS-policy opreator is not installed. Please check your tks-admin cluster." + exit -1 + fi + kube_secret=$(kubectl get secret -n ${CLUSTER_ID} ${CLUSTER_ID}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d) + POLICY_NS=$(kubectl get ns ${CLUSTER_ID} -o jsonpath='{.metadata.labels.tks\.io\/policy}') + + cat </tks_cluster_${CLUSTER_ID}.yaml + --- + apiVersion: tkspolicy.openinfradev.github.io/v1 + kind: TKSCluster + metadata: + labels: + app.kubernetes.io/created-by: tks-policy-wftpl + app.kubernetes.io/instance: ${CLUSTER_ID} + app.kubernetes.io/name: tkscluster + app.kubernetes.io/part-of: tks-policy-operator + name: ${CLUSTER_ID} + namespace: ${POLICY_NS} + spec: + clusterName: ${CLUSTER_ID} + context: ${CLUSTER_ID} + EOF + + cat /tks_cluster_${CLUSTER_ID}.yaml + kubectl apply -f /tks_cluster_${CLUSTER_ID}.yaml -n ${POLICY_NS} + + env: + - name: CLUSTER_ID + value: "{{ inputs.parameters.cluster_id }}" + - name: CONTRACT_ID + value: "{{ inputs.parameters.contract_id }}" + volumeMounts: + - name: kubeconfig-adm + mountPath: "/kube" + activeDeadlineSeconds: 900 + retryStrategy: + limit: 2 diff --git a/deploy_apps/tks-primary-cluster.yaml b/deploy_apps/tks-primary-cluster.yaml index 3c876ff4..4ea142df 100644 --- a/deploy_apps/tks-primary-cluster.yaml +++ b/deploy_apps/tks-primary-cluster.yaml @@ -111,7 +111,8 @@ spec: value: | [ { "app_group": "lma", "path": "minio", "namespace": "lma", "target_cluster": "" }, - { "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" } + { "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" }, + { "app_group": "lma", "path": "loki-user", "namespace": "lma", "target_cluster": "" } ] when: "{{workflow.parameters.object_store}} == minio" @@ -222,7 +223,8 @@ spec: value: | [ { "app_group": "lma", "path": "lma-bucket", "namespace": "taco-system", "target_cluster": "" }, - { "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" } + { "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" }, + { "app_group": "lma", "path": "loki-user", "namespace": "lma", "target_cluster": "" } ] - name: update-eps-for-thanos @@ -383,6 +385,8 @@ spec: cd - done + + ls yq -i e ".global.tksIamRoles=[${iamRoles}]" ${primary_cluster}/${primary_cluster}/lma/site-values.yaml git config --global user.name "tks" @@ -490,7 +494,31 @@ spec: fi fi - if [[ "$OBJECT_STORE" == "minio" ]]; then + LOKI_USER_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.loki_user}'| base64 -d ) + + if [[ "$LOKI_USER_SERVICE" == "" ]]; then + while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ] + do + if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then + log "FAIL" "The infras on primary are not cofigured properly.(No LoadBalancer)" + exit -1 + fi + + echo "Waiting for generating the loadbalancer of LOKI_USER(3s)" + sleep 3 + done + + LOKI_USER_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + LOKI_USER_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}") + else + LOKI_USER_HOST=$(echo $LOKI_USER_SERVICE | awk -F : '{print $1}') + LOKI_USER_PORT=$(echo $LOKI_USER_SERVICE | awk -F : '{print $2}') + if [[ "$LOKI_USER_PORT" == "" ]]; then + LOKI_USER_PORT=80 + fi + fi + + if [[ "${primary_cluster}" == "${current_cluster}" ]] && [[ "$OBJECT_STORE" == "minio" ]]; then S3_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.minio}'| base64 -d ) if [[ "$S3_SERVICE" == "" ]]; then S3_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma minio -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") @@ -512,15 +540,14 @@ spec: for member in $member_clusters do # 1. endpoint of fb on eachcluster - log "INFO" "##### change the loki target to $LOKI_HOST:$LOKI_PORT and $S3_SERVICE (the current target is ${member})" + log "INFO" "##### change the loki target to $LOKI_HOST:$LOKI_PORT (the current target is ${member})" [ -d ${member} ] || git clone ${repository_base}${member} cd ${member} yq -i e ".global.lokiHost=\"${LOKI_HOST}\"" ${member}/lma/site-values.yaml yq -i e ".global.lokiPort=\"${LOKI_PORT}\"" ${member}/lma/site-values.yaml - if [[ "$OBJECT_STORE" == "minio" ]]; then - yq -i e ".global.s3Service=\"${S3_SERVICE}\"" ${member}/lma/site-values.yaml - fi + yq -i e ".global.lokiuserHost=\"${LOKI_USER_HOST}\"" ${member}/lma/site-values.yaml + yq -i e ".global.lokiuserPort=\"${LOKI_USER_PORT}\"" ${member}/lma/site-values.yaml yq -i e ".global.clusterName=\"${member}\"" ${member}/lma/site-values.yaml @@ -529,6 +556,9 @@ spec: yq -i e ".global.grafanaDatasourceMetric=\"thanos-query.lma:9090\"" ${member}/lma/site-values.yaml yq -i e ".global.TksWebhookUrl=\"{{workflow.parameters.alert_tks}}\"" ${member}/lma/site-values.yaml yq -i e ".global.SlackUrl=\"{{workflow.parameters.alert_slack}}\"" ${member}/lma/site-values.yaml + if [[ "$OBJECT_STORE" == "minio" ]]; then + yq -i e ".global.s3Service=\"${S3_SERVICE}\"" ${member}/lma/site-values.yaml + fi else yq -i e ".global.grafanaDatasourceMetric=\"lma-prometheus.lma:9090\"" ${member}/lma/site-values.yaml fi @@ -543,9 +573,9 @@ spec: do cd ${member} if [[ `git status --porcelain` ]]; then - log "INFO" "##### commit changes on ${member} to $LOKI_HOST:$LOKI_PORT and $S3_SERVICE" + log "INFO" "##### commit changes on ${member} to loki: $LOKI_HOST:$LOKI_PORT " if [[ "$OBJECT_STORE" == "minio" ]]; then - cmessage="the loki to $LOKI_HOST:$LOKI_PORT and grafana to $S3_SERVICE (cluster ${member})" + cmessage="the loki to $LOKI_HOST:$LOKI_PORT and prometheus to $S3_SERVICE (cluster ${member})" else cmessage="the loki to $LOKI_HOST:$LOKI_PORT (cluster ${member})" fi @@ -852,7 +882,7 @@ spec: for cid in {{inputs.parameters.target-clusters}} do - for app in loki grafana + for app in loki grafana loki-user do APP=${cid}-${app} @@ -866,7 +896,7 @@ spec: for cid in {{inputs.parameters.target-clusters}} do - for app in loki grafana + for app in loki grafana loki-user do APP=${cid}-${app} echo "Found app '$APP'. Start deleting it.." @@ -1063,6 +1093,7 @@ spec: aws s3 rm s3://${cluster_id}-tks-thanos --recursive aws s3 rm s3://${cluster_id}-tks-loki --recursive + aws s3 rm s3://${cluster_id}-tks-loki-user --recursive restartPolicy: Never backoffLimit: 4 diff --git a/deploy_apps/tks-remove-lma-federation-wftpl.yaml b/deploy_apps/tks-remove-lma-federation-wftpl.yaml index 162ebf2f..b0e93cdd 100644 --- a/deploy_apps/tks-remove-lma-federation-wftpl.yaml +++ b/deploy_apps/tks-remove-lma-federation-wftpl.yaml @@ -341,8 +341,8 @@ spec: sleep 5 done - grafana_ep_secret=${kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d } - if [ grafana_ep_secret == "" ]; then + grafana_ep_secret=$(kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d ) || grafana_ep_secret="" + if [ "$grafana_ep_secret" == "" ]; then while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]; then log "FAIL" "A service for the grafana in ${cluster_id} is not configured properly.(No LoadBalancer)" diff --git a/dockerfiles/Dockerfile.e2e-test b/dockerfiles/Dockerfile.e2e-test index 703f349a..2df7cd18 100644 --- a/dockerfiles/Dockerfile.e2e-test +++ b/dockerfiles/Dockerfile.e2e-test @@ -15,6 +15,10 @@ COPY tks /usr/local/bin/tks RUN curl -Lo /usr/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" RUN chmod +x /usr/bin/kubectl +RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo +RUN sed -i s/^#.*baseurl=http/baseurl=https/g /etc/yum.repos.d/*.repo +RUN sed -i s/^mirrorlist=http/#mirrorlist=https/g /etc/yum.repos.d/*.repo + RUN yum install -y epel-release RUN yum install -y jq diff --git a/git-repo/create-contract-repo.yaml b/git-repo/create-contract-repo.yaml index 8fd1cd81..ea08c7a3 100644 --- a/git-repo/create-contract-repo.yaml +++ b/git-repo/create-contract-repo.yaml @@ -54,6 +54,7 @@ spec: git clone -b ${REVISION} $GIT_SVC_HTTP://$(echo -n $TOKEN)@${GIT_SVC_BASE_URL}/${USERNAME}/decapod-site.git cd decapod-site + git switch -c newbranch echo "Decapod Site Repo Revision: "${REVISION} > META echo "Decapod Site Repo Commit: "$(git rev-parse HEAD) >> META @@ -85,7 +86,7 @@ spec: git commit -m "new contract: ${CONTRACT_ID}" git remote add new_contract $GIT_SVC_HTTP://$(echo -n $TOKEN)@${GIT_SVC_BASE_URL}/${USERNAME}/${CONTRACT_ID} - git push new_contract ${REVISION}:main + git push new_contract newbranch:main cd .. envFrom: diff --git a/k8s-cli/k8s-cli.yaml b/k8s-cli/k8s-cli.yaml index 99b0796a..035768f8 100644 --- a/k8s-cli/k8s-cli.yaml +++ b/k8s-cli/k8s-cli.yaml @@ -5,6 +5,191 @@ metadata: namespace: argo spec: templates: + - name: delete-cluster-role + inputs: + parameters: + - name: target_cluster_id + value: cj7e583yl + - name: is_self_target + value: 'true' + - name: cluster_role_name + value: test1 + - name: ignore_not_found + value: 'true' + script: + command: + - python3 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + source: |2 + + import sys + from kubernetes import client, config + import yaml + import base64 + import json + input_params = {'target_cluster_id': '{{inputs.parameters.target_cluster_id}}', 'is_self_target': '{{inputs.parameters.is_self_target}}', 'cluster_role_name': '{{inputs.parameters.cluster_role_name}}', 'ignore_not_found': '{{inputs.parameters.ignore_not_found}}'} + + def get_kubernetes_api(local=False): + if local: + import os + kubeconfig_path = os.path.expandvars('$HOME/.kube/config') + api_config = client.Configuration() + config.load_kube_config(config_file=kubeconfig_path, client_configuration=api_config) + else: + api_config = client.Configuration() + config.load_incluster_config(client_configuration=api_config) + return client.ApiClient(configuration=api_config) + + def get_kubernetes_api_from_kubeconfig(kubeconfig_str): + kubeconfig_dict = yaml.safe_load(kubeconfig_str) + api_config = client.Configuration() + config.load_kube_config_from_dict(kubeconfig_dict, client_configuration=api_config) + return client.ApiClient(configuration=api_config) + + def get_kubeconfig_secret(k8s_client, secret_name, secret_namespace): + api_instance = client.CoreV1Api(k8s_client) + secret_obj = api_instance.read_namespaced_secret(name=secret_name, namespace=secret_namespace) + encoded_data = secret_obj.data.get('value') + decoded_data = base64.b64decode(encoded_data).decode('utf-8') + return decoded_data + + def delete_cluster_role(api_client, name, ignore_exist): + api_instance = client.RbacAuthorizationV1Api(api_client) + if (ignore_exist == 'true'): + try: + api_instance.delete_cluster_role(name) + except Exception as e: + if ('Not Found' in str(e)): + print(f'cluster role "{name}" not found') + else: + raise e + else: + api_instance.delete_cluster_role(name) + + def input_validation(origin_input_params): + if ((not origin_input_params['target_cluster_id']) or (origin_input_params['target_cluster_id'] == '')): + raise Exception('target_cluster_id is required') + if ((not origin_input_params['is_self_target']) or (origin_input_params['is_self_target'] == '')): + raise Exception('is_self_target is required') + if ((not origin_input_params['cluster_role_name']) or (origin_input_params['cluster_role_name'] == '')): + raise Exception('cluster_role_name is required') + if ((not origin_input_params['ignore_not_found']) or (origin_input_params['ignore_not_found'] == '')): + raise Exception('ignore_not_found is required') + input_validation(input_params) + if (input_params['is_self_target'] == 'true'): + target_k8s_client = k8s_client = get_kubernetes_api(local=False) + else: + k8s_client = get_kubernetes_api(local=False) + target_k8s_kubeconfig = get_kubeconfig_secret(k8s_client, (input_params['target_cluster_id'] + '-tks-kubeconfig'), input_params['target_cluster_id']) + target_k8s_client = get_kubernetes_api_from_kubeconfig(target_k8s_kubeconfig) + try: + delete_cluster_role(target_k8s_client, input_params['cluster_role_name'], input_params['ignore_not_found']) + print(f"""delete cluster role "{input_params['cluster_role_name']}" success""") + except Exception as e: + print('Exception when calling delete_cluster_role') + print(e) + sys.exit(1) + sys.exit(0) + - name: create-cluster-role + inputs: + parameters: + - name: target_cluster_id + value: cj7e583yl + - name: is_self_target + value: 'true' + - name: cluster_role_name + value: test1 + - name: api_group + value: '*' + - name: resource_name + value: '*' + - name: verbs + value: '["get", "list"]' + - name: ignore_exist + value: 'true' + script: + command: + - python3 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + source: |2 + + import sys + from kubernetes import client, config + import yaml + import base64 + import json + input_params = {'target_cluster_id': '{{inputs.parameters.target_cluster_id}}', 'is_self_target': '{{inputs.parameters.is_self_target}}', 'cluster_role_name': '{{inputs.parameters.cluster_role_name}}', 'api_group': '{{inputs.parameters.api_group}}', 'resource_name': '{{inputs.parameters.resource_name}}', 'verbs': '{{inputs.parameters.verbs}}', 'ignore_exist': '{{inputs.parameters.ignore_exist}}'} + + def get_kubernetes_api(local=False): + if local: + import os + kubeconfig_path = os.path.expandvars('$HOME/.kube/config') + api_config = client.Configuration() + config.load_kube_config(config_file=kubeconfig_path, client_configuration=api_config) + else: + api_config = client.Configuration() + config.load_incluster_config(client_configuration=api_config) + return client.ApiClient(configuration=api_config) + + def get_kubernetes_api_from_kubeconfig(kubeconfig_str): + kubeconfig_dict = yaml.safe_load(kubeconfig_str) + api_config = client.Configuration() + config.load_kube_config_from_dict(kubeconfig_dict, client_configuration=api_config) + return client.ApiClient(configuration=api_config) + + def get_kubeconfig_secret(k8s_client, secret_name, secret_namespace): + api_instance = client.CoreV1Api(k8s_client) + secret_obj = api_instance.read_namespaced_secret(name=secret_name, namespace=secret_namespace) + encoded_data = secret_obj.data.get('value') + decoded_data = base64.b64decode(encoded_data).decode('utf-8') + return decoded_data + + def create_cluster_role(api_client, name, api_group, resource_name, verbs, ignore_exist): + api_instance = client.RbacAuthorizationV1Api(api_client) + body = {'apiVersion': 'rbac.authorization.k8s.io/v1', 'kind': 'ClusterRole', 'metadata': {'name': name}, 'rules': [{'apiGroups': [api_group], 'resources': [resource_name], 'verbs': verbs}]} + if (ignore_exist == 'true'): + try: + return api_instance.create_cluster_role(body) + except client.ApiException as e: + if (e.status == 409): + print(f'cluster role "{name}" already exists') + return + else: + raise e + else: + return api_instance.create_cluster_role(body) + + def input_validation(origin_input_params): + if ((not origin_input_params['target_cluster_id']) or (origin_input_params['target_cluster_id'] == '')): + raise Exception('target_cluster_id is required') + if ((not origin_input_params['is_self_target']) or (origin_input_params['is_self_target'] == '')): + raise Exception('is_self_target is required') + if ((not origin_input_params['cluster_role_name']) or (origin_input_params['cluster_role_name'] == '')): + raise Exception('cluster_role_name is required') + if ((not origin_input_params['api_group']) or (origin_input_params['api_group'] == '')): + raise Exception('api_group is required') + if ((not origin_input_params['resource_name']) or (len(origin_input_params['resource_name']) == 0)): + raise Exception('resource_name is required') + if ((not origin_input_params['verbs']) or (len(origin_input_params['verbs']) == 0)): + raise Exception('verbs is required') + if ((not origin_input_params['ignore_exist']) or (len(origin_input_params['ignore_exist']) == 0)): + raise Exception('ignore_exist is required') + input_validation(input_params) + input_params['verbs'] = json.loads(input_params['verbs']) + if (input_params['is_self_target'] == 'true'): + target_k8s_client = k8s_client = get_kubernetes_api(local=False) + else: + k8s_client = get_kubernetes_api(local=False) + target_k8s_kubeconfig = get_kubeconfig_secret(k8s_client, (input_params['target_cluster_id'] + '-tks-kubeconfig'), input_params['target_cluster_id']) + target_k8s_client = get_kubernetes_api_from_kubeconfig(target_k8s_kubeconfig) + try: + create_cluster_role(target_k8s_client, input_params['cluster_role_name'], input_params['api_group'], input_params['resource_name'], input_params['verbs'], input_params['ignore_exist']) + print(f"""create cluster role "{input_params['cluster_role_name']}" success""") + except Exception as e: + print('Exception when calling create_cluster_role') + print(e) + sys.exit(1) + sys.exit(0) - name: delete-cluster-role-binding inputs: parameters: @@ -17,7 +202,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 import sys @@ -96,7 +281,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 import sys diff --git a/keycloak-client/lib/keycloak-clients.yaml b/keycloak-client/lib/keycloak-clients.yaml index 77f21f58..f1539bc9 100644 --- a/keycloak-client/lib/keycloak-clients.yaml +++ b/keycloak-client/lib/keycloak-clients.yaml @@ -21,7 +21,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -43,7 +43,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def delete_client(url, realm_name, client_id, token): @@ -118,7 +119,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -139,7 +140,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -203,7 +205,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -224,7 +226,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -295,7 +298,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -316,7 +319,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -394,7 +398,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -416,7 +420,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def create_client_scope_mapper(url, realm_name, client_id, hashed_client_id, token, mapper_name): @@ -491,7 +496,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -512,7 +517,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -574,7 +580,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -596,7 +602,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def create_client(url, realm_name, client_id, token): @@ -676,7 +683,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -699,7 +706,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -783,7 +791,7 @@ spec: mountPath: "/out" command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenID, KeycloakAdmin, KeycloakOpenIDConnection @@ -807,7 +815,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def get_client_secret(url, realm_name, client_id, token): diff --git a/keycloak-client/lib/keycloak-realms.yaml b/keycloak-client/lib/keycloak-realms.yaml index 02b58e4d..ec13fc89 100644 --- a/keycloak-client/lib/keycloak-realms.yaml +++ b/keycloak-client/lib/keycloak-realms.yaml @@ -19,7 +19,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -40,7 +40,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -89,7 +90,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -110,7 +111,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): diff --git a/keycloak-client/lib/keycloak-users.yaml b/keycloak-client/lib/keycloak-users.yaml index 2168b726..67341cf0 100644 --- a/keycloak-client/lib/keycloak-users.yaml +++ b/keycloak-client/lib/keycloak-users.yaml @@ -25,7 +25,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -48,7 +48,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -131,7 +132,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -152,7 +153,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -206,7 +208,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -230,7 +232,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -303,7 +306,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -325,7 +328,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -401,7 +405,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -423,7 +427,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): @@ -498,7 +503,7 @@ spec: script: command: - python3 - image: harbor-cicd.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 + image: harbor.taco-cat.xyz/dev/python-keycloak-cli:v0.1.0 source: |2 from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID @@ -519,7 +524,8 @@ spec: def get_secret(k8s_client, secret_name, secret_namespace): secret_obj = k8s_client.read_namespaced_secret(name=secret_name, namespace=secret_namespace) encoded_data = secret_obj.data.get('admin-password') - decoded_data = base64.b64decode(encoded_data).decode('utf-8') + cleaned_data = encoded_data.strip() + decoded_data = base64.b64decode(cleaned_data).decode('utf-8') return decoded_data def input_validation(origin_input_params): diff --git a/keycloak-client/set-user-cluster.yaml b/keycloak-client/set-user-cluster.yaml index 4be2884e..886ec2d5 100644 --- a/keycloak-client/set-user-cluster.yaml +++ b/keycloak-client/set-user-cluster.yaml @@ -40,78 +40,78 @@ spec: value: "{{inputs.parameters.keycloak_credential_secret_name}}" - name: keycloak_credential_secret_namespace value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" - - - name: create-client-admin-role - templateRef: - name: keycloak-client - template: create-client-role - arguments: - parameters: - - name: server_url - value: "{{inputs.parameters.server_url}}" - - name: target_realm_name - value: "{{inputs.parameters.target_realm_name}}" - - name: target_client_id - value: "{{inputs.parameters.target_client_id}}" - - name: keycloak_credential_secret_name - value: "{{inputs.parameters.keycloak_credential_secret_name}}" - - name: keycloak_credential_secret_namespace - value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" - - name: client_role_name - value: "cluster-admin" - - - name: create-client-clusterId-admin-role - templateRef: - name: keycloak-client - template: create-client-role - arguments: - parameters: - - name: server_url - value: "{{inputs.parameters.server_url}}" - - name: target_realm_name - value: "{{inputs.parameters.target_realm_name}}" - - name: target_client_id - value: "{{inputs.parameters.target_client_id}}" - - name: keycloak_credential_secret_name - value: "{{inputs.parameters.keycloak_credential_secret_name}}" - - name: keycloak_credential_secret_namespace - value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" - - name: client_role_name - value: "{{inputs.parameters.cluster_id}}-cluster-admin" - - - name: create-client-view-role - templateRef: - name: keycloak-client - template: create-client-role - arguments: - parameters: - - name: server_url - value: "{{inputs.parameters.server_url}}" - - name: target_realm_name - value: "{{inputs.parameters.target_realm_name}}" - - name: target_client_id - value: "{{inputs.parameters.target_client_id}}" - - name: keycloak_credential_secret_name - value: "{{inputs.parameters.keycloak_credential_secret_name}}" - - name: keycloak_credential_secret_namespace - value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" - - name: client_role_name - value: "cluster-view" - - - name: create-client-clusterId-view-role - templateRef: - name: keycloak-client - template: create-client-role - arguments: - parameters: - - name: server_url - value: "{{inputs.parameters.server_url}}" - - name: target_realm_name - value: "{{inputs.parameters.target_realm_name}}" - - name: target_client_id - value: "{{inputs.parameters.target_client_id}}" - - name: keycloak_credential_secret_name - value: "{{inputs.parameters.keycloak_credential_secret_name}}" - - name: keycloak_credential_secret_namespace - value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" - - name: client_role_name - value: "{{inputs.parameters.cluster_id}}-cluster-view" +# - - name: create-client-admin-role +# templateRef: +# name: keycloak-client +# template: create-client-role +# arguments: +# parameters: +# - name: server_url +# value: "{{inputs.parameters.server_url}}" +# - name: target_realm_name +# value: "{{inputs.parameters.target_realm_name}}" +# - name: target_client_id +# value: "{{inputs.parameters.target_client_id}}" +# - name: keycloak_credential_secret_name +# value: "{{inputs.parameters.keycloak_credential_secret_name}}" +# - name: keycloak_credential_secret_namespace +# value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" +# - name: client_role_name +# value: "cluster-admin" +# - - name: create-client-clusterId-admin-role +# templateRef: +# name: keycloak-client +# template: create-client-role +# arguments: +# parameters: +# - name: server_url +# value: "{{inputs.parameters.server_url}}" +# - name: target_realm_name +# value: "{{inputs.parameters.target_realm_name}}" +# - name: target_client_id +# value: "{{inputs.parameters.target_client_id}}" +# - name: keycloak_credential_secret_name +# value: "{{inputs.parameters.keycloak_credential_secret_name}}" +# - name: keycloak_credential_secret_namespace +# value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" +# - name: client_role_name +# value: "{{inputs.parameters.cluster_id}}-cluster-admin" +# - - name: create-client-view-role +# templateRef: +# name: keycloak-client +# template: create-client-role +# arguments: +# parameters: +# - name: server_url +# value: "{{inputs.parameters.server_url}}" +# - name: target_realm_name +# value: "{{inputs.parameters.target_realm_name}}" +# - name: target_client_id +# value: "{{inputs.parameters.target_client_id}}" +# - name: keycloak_credential_secret_name +# value: "{{inputs.parameters.keycloak_credential_secret_name}}" +# - name: keycloak_credential_secret_namespace +# value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" +# - name: client_role_name +# value: "cluster-view" +# - - name: create-client-clusterId-view-role +# templateRef: +# name: keycloak-client +# template: create-client-role +# arguments: +# parameters: +# - name: server_url +# value: "{{inputs.parameters.server_url}}" +# - name: target_realm_name +# value: "{{inputs.parameters.target_realm_name}}" +# - name: target_client_id +# value: "{{inputs.parameters.target_client_id}}" +# - name: keycloak_credential_secret_name +# value: "{{inputs.parameters.keycloak_credential_secret_name}}" +# - name: keycloak_credential_secret_namespace +# value: "{{inputs.parameters.keycloak_credential_secret_namespace}}" +# - name: client_role_name +# value: "{{inputs.parameters.cluster_id}}-cluster-view" - - name: create-client-scope-mapper-client-role templateRef: name: keycloak-client diff --git a/tks-cli/tks-cli.yaml b/tks-cli/tks-cli.yaml index 164f2514..3a4675f8 100644 --- a/tks-cli/tks-cli.yaml +++ b/tks-cli/tks-cli.yaml @@ -12,7 +12,7 @@ spec: - name: login-tks-api container: name: login-tks-api - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -31,7 +31,7 @@ spec: - name: description container: name: create-organization - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -91,9 +91,11 @@ spec: - name: infra_conf - name: cloud_service - name: cluster_endpoint + - name: policy_ids + - name: cluster_domains container: name: create-usercluster - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -119,10 +121,12 @@ spec: --description "{{inputs.parameters.description}}" \ --cloud-service "{{inputs.parameters.cloud_service}}" \ --cluster-endpoint "{{inputs.parameters.cluster_endpoint}}" \ + --domains "{{inputs.parameters.cluster_domains}}" \ --stack 1 \ --tks-cp-node $TKS_CP_NODE \ --tks-infra-node $TKS_INFRA_NODE \ - --tks-user-node $TKS_USER_NODE + --tks-user-node $TKS_USER_NODE \ + --policy-ids "{{inputs.parameters.policy_ids}}" threshold=720 for i in $(seq 1 $threshold) @@ -162,7 +166,7 @@ spec: - name: organization_id container: name: install-usercluster - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -177,7 +181,7 @@ spec: echo "* Install $CL_NAME cluster" tks cluster install -c ${CL_ID} - threshold=720 + threshold=120000 for i in $(seq 1 $threshold) do CL_STATUS=$(tks cluster list "{{inputs.parameters.organization_id}}" | grep -w $CL_ID | awk '{ print $4 }') @@ -208,6 +212,73 @@ spec: default: "Something wrong" path: /mnt/out/cluster_id.txt + - name: import-usercluster + inputs: + parameters: + - name: cluster_name + - name: stack_template_id + - name: organization_id + - name: creator + - name: description + - name: policy_ids + - name: cluster_domains + - name: kubeconfig_string + container: + name: import-usercluster + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 + imagePullPolicy: Always + envFrom: + - secretRef: + name: "tks-api-secret" + command: + - /bin/bash + - '-exc' + - | + tks login {{workflow.parameters.tks_api_url}} --organization-id ${ORGANIZATION_ID} --account-id ${ACCOUNT_ID} --password ${PASSWORD} + + CL_NAME="{{inputs.parameters.cluster_name}}" + + echo "* Import $CL_NAME cluster" + tks cluster import ${CL_NAME} \ + --stack-template-id "{{inputs.parameters.stack_template_id}}" \ + --organization-id "{{inputs.parameters.organization_id}}" \ + --description "{{inputs.parameters.description}}" \ + --cluster-type "USER" \ + --kubeconfig-string "{{inputs.parameters.kubeconfig_string}}" \ + --domains "{{inputs.parameters.cluster_domains}}" \ + --policy-ids "{{inputs.parameters.policy_ids}}" + + threshold=720 + for i in $(seq 1 $threshold) + do + CL_STATUS=$(tks cluster list "{{inputs.parameters.organization_id}}" | grep -w $CL_NAME | awk '{ print $4 }') + if [ "$CL_STATUS" = "RUNNING" ]; then + break + elif [ "$CL_STATUS" = "ERROR" ]; then + exit 1 + fi + + if [ "$i" -ge "$threshold" ]; then + echo "Timed out waiting for user-cluster to be ready." + exit 1 + fi + sleep 5 + done + + tks cluster list "{{inputs.parameters.organization_id}}" | grep -w $CL_NAME | awk '{print $3}' | tee /mnt/out/cluster_id.txt + volumeMounts: + - name: out + mountPath: /mnt/out + volumes: + - name: out + emptyDir: { } + outputs: + parameters: + - name: cluster-id + valueFrom: + default: "Something wrong" + path: /mnt/out/cluster_id.txt + - name: delete-usercluster inputs: parameters: @@ -215,7 +286,7 @@ spec: - name: cluster_id container: name: delete-usercluster - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -258,7 +329,7 @@ spec: - name: description container: name: create-appgroup - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -308,7 +379,7 @@ spec: - name: appgroup_id container: name: delete-appgroup - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" @@ -348,7 +419,7 @@ spec: - name: name container: name: get-appgroup-id - image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.1.4 + image: harbor.taco-cat.xyz/tks/tks-e2e-test:v3.2.2 envFrom: - secretRef: name: "tks-api-secret" diff --git a/tks-cluster/aws-cluster-autoscaler-iam.yaml b/tks-cluster/aws-cluster-autoscaler-iam.yaml index b7e4da04..67dab270 100644 --- a/tks-cluster/aws-cluster-autoscaler-iam.yaml +++ b/tks-cluster/aws-cluster-autoscaler-iam.yaml @@ -115,7 +115,7 @@ spec: parameters: - name: cloud_account_id container: - image: harbor.taco-cat.xyz/tks/tks-aws:v1.0.3 + image: harbor.taco-cat.xyz/tks/tks-aws:v1.1.0 command: - /bin/bash - -exc @@ -132,10 +132,22 @@ spec: export AWS_ACCESS_KEY_ID=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.AccessKeyId') export AWS_SECRET_ACCESS_KEY=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.SecretAccessKey') export AWS_SESSION_TOKEN=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.SessionToken') + + ROLE_ARN_REMOVED_SUFFIX=${ROLE_ARN%:*} + AWS_ACCOUNT_ID=${ROLE_ARN_REMOVED_SUFFIX#*::} + else + AWS_ACCOUNT_ID=$(kubectl get secret -n argo awsconfig-secret -ojsonpath='{.data.AWS_ACCOUNT_ID}' | base64 -d) fi eksctl delete iamserviceaccount --cluster $CLUSTER_ID --name cluster-autoscaler --namespace kube-system + oidc_id=$(aws eks describe-cluster --name $CLUSTER_ID --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f3-5) + aws iam delete-open-id-connect-provider --open-id-connect-provider-arn arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/$oidc_id + + #aws iam detach-role-policy --role-name cluster-autoscaler-$CLUSTER_ID --policy-arn arn:aws:iam::$AWS_ACCOUNT_ID:policy/cluster-autoscaler-$CLUSTER_ID + #aws iam delete-role --role-name cluster-autoscaler-$CLUSTER_ID + aws iam delete-policy --policy-arn arn:aws:iam::$AWS_ACCOUNT_ID:policy/cluster-autoscaler-$CLUSTER_ID + env: - name: CLUSTER_ID value: "{{workflow.parameters.cluster_id}}" diff --git a/tks-cluster/aws-eks-keycloak-oidc-provider.yaml b/tks-cluster/aws-eks-keycloak-oidc-provider.yaml new file mode 100644 index 00000000..f7eedb34 --- /dev/null +++ b/tks-cluster/aws-eks-keycloak-oidc-provider.yaml @@ -0,0 +1,93 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: aws-eks-keycloak-oidc-provider + namespace: argo +spec: + entrypoint: createProvider + arguments: + parameters: + - name: contract_id + value: "o6t7z6qzp" + - name: cluster_id + value: "C011b88fa" + - name: keycloak_url + value: 'https://keycloak.yourdomain.org/auth' + - name: cloud_account_id # will be not NULL if the cluster is multitenancy + value: "NULL" + + volumes: + - name: awsconfig + secret: + secretName: awsconfig-secret + - name: kubeconfig-adm + secret: + secretName: tks-admin-kubeconfig-secret + + templates: + - name: createProvider + inputs: + parameters: + - name: contract_id + - name: cluster_id + - name: keycloak_url + - name: cloud_account_id + activeDeadlineSeconds: 1800 + container: + image: harbor.taco-cat.xyz/tks/tks-aws:v1.1.0 + command: + - /bin/bash + - -exc + - | + cp /kube/value kubeconfig_adm + export KUBECONFIG=kubeconfig_adm + mkdir ~/.aws + cp /aws/* ~/.aws/ + + # Use AWS STS temporary security credential if multi-tenancy + if [ "$CLOUD_ACCOUNT_ID" != "NULL" ]; then + ROLE_ARN=$(kubectl get awsri $CLOUD_ACCOUNT_ID-account-role -ojsonpath='{.spec.roleARN}') + aws sts assume-role --role-arn $ROLE_ARN --role-session-name "TKS-ClusterAutoscaler-$CLUSTER_ID" --output json | tee ~/assume-role-sts-credential.txt + export AWS_ACCESS_KEY_ID=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.AccessKeyId') + export AWS_SECRET_ACCESS_KEY=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.SecretAccessKey') + export AWS_SESSION_TOKEN=$(cat ~/assume-role-sts-credential.txt | jq -r '.Credentials.SessionToken') + + ROLE_ARN_REMOVED_SUFFIX=${ROLE_ARN%:*} + AWS_ACCOUNT_ID=${ROLE_ARN_REMOVED_SUFFIX#*::} + else + AWS_ACCOUNT_ID=$(kubectl get secret -n argo awsconfig-secret -ojsonpath='{.data.AWS_ACCOUNT_ID}' | base64 -d) + fi + + # generate OIDC provider for EKS cluster + ISSUER_URL=$KEYCLOAK_URL/realms/$CONTRACT_ID + CLIENT_ID=$CLUSTER_ID-k8s-api + cat <oidc-config.yaml + apiVersion: eksctl.io/v1alpha5 + kind: ClusterConfig + metadata: + name: $CLUSTER_ID + region: ap-northeast-2 + identityProviders: + - name: keycloak + type: oidc + issuerUrl: $ISSUER_URL + clientId: $CLIENT_ID + usernameClaim: preferred_username + groupsClaim: groups + EOF + cat oidc-config.yaml + eksctl associate identityprovider -f oidc-config.yaml + env: + - name: CLUSTER_ID + value: "{{ inputs.parameters.cluster_id }}" + - name: KEYCLOAK_URL + value: "{{ inputs.parameters.keycloak_url }}" + - name: CONTRACT_ID + value: "{{ inputs.parameters.contract_id }}" + - name: CLOUD_ACCOUNT_ID + value: "{{workflow.parameters.cloud_account_id}}" + volumeMounts: + - name: awsconfig + mountPath: "/aws" + - name: kubeconfig-adm + mountPath: "/kube" diff --git a/tks-cluster/create-usercluster-wftpl.yaml b/tks-cluster/create-usercluster-wftpl.yaml index 1e606f5b..d7a47bcf 100644 --- a/tks-cluster/create-usercluster-wftpl.yaml +++ b/tks-cluster/create-usercluster-wftpl.yaml @@ -30,7 +30,9 @@ spec: - name: base_repo_branch value: "main" - name: keycloak_url - value: 'https://keycloak.yourdomain.org/auth' + value: "https://keycloak.yourdomain.org/auth" + - name: policy_ids + value: "" volumes: - name: kubeconfig-adm @@ -114,6 +116,16 @@ spec: ] when: "{{steps.tks-create-cluster-repo.outputs.parameters.infra_provider}} == byoh" + - - name: tks-create-config-secret + template: create-endpoint-secret + arguments: + parameters: + - name: cluster_domains + value: "{{steps.tks-get-cluster-info.outputs.parameters.cluster_domains}}" + - name: cluster_id + value: "{{ workflow.parameters.cluster_id }}" + when: "{{steps.tks-create-cluster-repo.outputs.parameters.infra_provider}} == byoh" + - - name: init-cluster-for-tks template: init-cluster-for-tks arguments: @@ -129,6 +141,24 @@ spec: - name: contract_id value: "{{ workflow.parameters.contract_id }}" + - - name: create-eks-keycloak-oidc-provider + templateRef: + name: aws-eks-keycloak-oidc-provider + template: createProvider + arguments: + parameters: + - name: contract_id + value: "{{ workflow.parameters.contract_id }}" + - name: cluster_id + value: "{{ workflow.parameters.cluster_id }}" + - name: keycloak_url + value: "{{ workflow.parameters.keycloak_url }}" + - name: cloud_account_id + value: "{{ workflow.parameters.cloud_account_id }}" + when: >- + {{steps.tks-create-cluster-repo.outputs.parameters.infra_provider}} == aws && + {{steps.tks-create-cluster-repo.outputs.parameters.managed_cluster}} == true + - - name: create-aws-cluster-autoscaler-iam templateRef: name: aws-cluster-autoscaler-iam @@ -340,59 +370,22 @@ spec: ] when: "{{steps.tks-create-cluster-repo.outputs.parameters.infra_provider}} == byoh" - - - name: set-keycloak-config + - - name: create-default-rbac-resources + template: k8s-rbac-setting + + - - name: install-policy-management templateRef: - name: set-user-cluster - template: main + name: tks-policy + template: deploy arguments: parameters: - name: cluster_id value: "{{ workflow.parameters.cluster_id }}" - - name: server_url - value: "{{ workflow.parameters.keycloak_url }}" - - name: target_realm_name + - name: contract_id value: "{{ workflow.parameters.contract_id }}" - - name: target_client_id - value: "{{ workflow.parameters.cluster_id}}-k8s-api" - - name: keycloak_credential_secret_name - value: "keycloak" - - name: keycloak_credential_secret_namespace - value: "keycloak" - - - - name: set-cluster-role-binding-cluster-admin - templateRef: - name: k8s-client - template: create-cluster-role-binding - arguments: - parameters: - - name: target_cluster_id - value: "{{workflow.parameters.cluster_id}}" - - name: is_self_target - value: "false" - - name: rolebinding_name - value: "{{workflow.parameters.cluster_id}}-cluster-admin" - - name: role_name - value: "admin" - - name: group_list - value: '["{{workflow.parameters.cluster_id}}-cluster-admin", "cluster-admin"]' - - - - name: set-cluster-role-binding-cluster-view - templateRef: - name: k8s-client - template: create-cluster-role-binding - arguments: - parameters: - - name: target_cluster_id - value: "{{workflow.parameters.cluster_id}}" - - name: is_self_target - value: "false" - - name: rolebinding_name - value: "{{workflow.parameters.cluster_id}}-cluster-view" - - name: role_name - value: "view" - - name: group_list - value: '["{{workflow.parameters.cluster_id}}-cluster-view", "cluster-view"]' - + - name: policy_ids + value: "{{ workflow.parameters.policy_ids }}" + # when: "{{steps.get-clusters-in-contract.outputs.parameters.primary_cluster}} != '' && {{workflow.parameters.cluster_id}} != {{steps.get-clusters-in-contract.outputs.parameters.primary_cluster}}" ####################### # Template Definition # @@ -417,6 +410,21 @@ spec: - | cp /kube/value kubeconfig_adm export KUBECONFIG=kubeconfig_adm + + ####### add tks info. on namespace ######## + kubectl label ns ${CLUSTER_ID} tks.io/organization=${CONTRACT_ID} + if [ $(kubectl get ns -l tks.io/organization=${CONTRACT_ID} --ignore-not-found=true | grep -v NAME | awk '{print $1}' | wc -l ) -le 1 ]; then + kubectl label ns ${CLUSTER_ID} tks.io/policy=${CLUSTER_ID} + else + POLICY_NS=$(kubectl get ns $(kubectl get ns -l tks.io/organization=${CONTRACT_ID} --ignore-not-found=true | grep -v NAME | awk '{print $1}' | head -n 1 ) --ignore-not-found=true -o jsonpath='{.metadata.labels.tks\.io\/policy}' ) + if [ -z "$POLICY_NS" ]; then + kubectl label ns ${CLUSTER_ID} tks.io/policy=${CLUSTER_ID} + else + kubectl label ns ${CLUSTER_ID} tks.io/policy=${POLICY_NS} + fi + fi + ########################################### + mkdir ~/.aws cp /aws/* ~/.aws/ @@ -529,32 +537,9 @@ spec: fi # Create a kubeconfig secret for TKS internal use from ArgoCD cluster secret and for TKS user - export KUBECONFIG=kubeconfig_adm + export KUBECONFIG=kubeconfig_adm if [ $kcp_count = 1 ]; then TKS_KUBECONFIG_WORKLOAD=$(kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-kubeconfig -o jsonpath="{.data.value}" | base64 -d) - if [ "$INFRA_PROVIDER" == "byoh" ]; then - ISSUER_URL=$KEYCLOAK_URL/realms/$CONTRACT_ID - CLIENT_ID=$CLUSTER_ID-k8s-api - OIDC_USER_NAME="oidc-user" - EXISTING_USER_NAME=$CLUSTER_ID-admin - - kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-kubeconfig -o jsonpath="{.data.value}" | base64 -d > tmp_user_kubeconfig - kubectl --kubeconfig=tmp_user_kubeconfig config unset users.$EXISTING_USER_NAME - kubectl --kubeconfig=tmp_user_kubeconfig config set-credentials $OIDC_USER_NAME \ - --exec-api-version=client.authentication.k8s.io/v1beta1 \ - --exec-command=kubectl \ - --exec-arg=oidc-login \ - --exec-arg=get-token \ - --exec-arg=--oidc-issuer-url=$ISSUER_URL \ - --exec-arg=--oidc-client-id=$CLIENT_ID \ - --exec-arg=--grant-type=password - - CONTEXT_NAME=$(kubectl --kubeconfig=tmp_user_kubeconfig config current-context) - kubectl --kubeconfig=tmp_user_kubeconfig config set-context $CONTEXT_NAME --user $OIDC_USER_NAME - TKS_USER_KUBECONFIG_WORKLOAD=$(cat tmp_user_kubeconfig) - else - TKS_USER_KUBECONFIG_WORKLOAD=$(kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-kubeconfig -o jsonpath="{.data.value}" | base64 -d) - fi elif [ $awsmcp_count = 1 ]; then CAPA_USER_KUBECONFIG_WORKLOAD=$(kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-user-kubeconfig -o jsonpath="{.data.value}" | base64 -d) @@ -566,14 +551,6 @@ spec: echo " token: ${CLIENT_TOKEN}" >> tmp_tks_kubeconfig_workload TKS_KUBECONFIG_WORKLOAD=$(cat tmp_tks_kubeconfig_workload) - # tks-user-kubeconfig - if [ "$CLOUD_ACCOUNT_ID" != "NULL" ]; then # multitenancy cluster - cat <<< $CAPA_USER_KUBECONFIG_WORKLOAD | sed "24 i \ \ \ \ \ - --role\n\ \ \ \ \ \ - $IDENTITY_ROLE_ARN" > tmp_tks_user_kubeconfig_workload - TKS_USER_KUBECONFIG_WORKLOAD=$(cat tmp_tks_user_kubeconfig_workload) - else - TKS_USER_KUBECONFIG_WORKLOAD=$CAPA_USER_KUBECONFIG_WORKLOAD - fi - cat < sc-taco-storage.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -591,6 +568,28 @@ spec: echo "Wrong Cluster type!" exit 1 fi + + # generate kubeconfig for user + ISSUER_URL=$KEYCLOAK_URL/realms/$CONTRACT_ID + CLIENT_ID=$CLUSTER_ID-k8s-api + OIDC_USER_NAME="oidc-user" + EXISTING_USER_NAME=$CLUSTER_ID-admin + + kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-kubeconfig -o jsonpath="{.data.value}" | base64 -d > tmp_user_kubeconfig + kubectl --kubeconfig=tmp_user_kubeconfig config unset users.$EXISTING_USER_NAME + kubectl --kubeconfig=tmp_user_kubeconfig config set-credentials $OIDC_USER_NAME \ + --exec-api-version=client.authentication.k8s.io/v1beta1 \ + --exec-command=kubectl \ + --exec-arg=oidc-login \ + --exec-arg=get-token \ + --exec-arg=--oidc-issuer-url=$ISSUER_URL \ + --exec-arg=--oidc-client-id=$CLIENT_ID \ + --exec-arg=--grant-type=password + + CONTEXT_NAME=$(kubectl --kubeconfig=tmp_user_kubeconfig config current-context) + kubectl --kubeconfig=tmp_user_kubeconfig config set-context $CONTEXT_NAME --user $OIDC_USER_NAME + TKS_USER_KUBECONFIG_WORKLOAD=$(cat tmp_user_kubeconfig) + cat <<< $TKS_KUBECONFIG_WORKLOAD > tks_kubeconfig_workload kubectl create secret generic -n $CLUSTER_ID $CLUSTER_ID-tks-kubeconfig --from-file=value=tks_kubeconfig_workload cat <<< $TKS_USER_KUBECONFIG_WORKLOAD > tks_user_kubeconfig_workload @@ -703,3 +702,199 @@ spec: kubectl --kubeconfig kubeconfig_temp apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -n kube-system kubectl --kubeconfig kubeconfig_temp apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml -n kube-system + + - name: k8s-rbac-setting + steps: + - - name: create-cluster-role-for-create + templateRef: + name: k8s-client + template: create-cluster-role + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: cluster_role_name + value: "cluster-admin-create" + - name: api_group + value: '*' + - name: resource_name + value: '*' + - name: verbs + value: '["create"]' + - name: ignore_exist + value: 'true' + - - name: create-cluster-role-for-read + templateRef: + name: k8s-client + template: create-cluster-role + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: cluster_role_name + value: "cluster-admin-read" + - name: api_group + value: '*' + - name: resource_name + value: '*' + - name: verbs + value: '["get", "list", "watch"]' + - name: ignore_exist + value: 'true' + - - name: create-cluster-role-for-update + templateRef: + name: k8s-client + template: create-cluster-role + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: cluster_role_name + value: "cluster-admin-update" + - name: api_group + value: '*' + - name: resource_name + value: '*' + - name: verbs + value: '["update", "patch"]' + - name: ignore_exist + value: 'true' + - - name: create-cluster-role-for-delete + templateRef: + name: k8s-client + template: create-cluster-role + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: cluster_role_name + value: "cluster-admin-delete" + - name: api_group + value: '*' + - name: resource_name + value: '*' + - name: verbs + value: '["delete", "deletecollection"]' + - name: ignore_exist + value: 'true' + - - name: set-cluster-role-binding-create + templateRef: + name: k8s-client + template: create-cluster-role-binding + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: rolebinding_name + value: "cluster-admin-create-rb" + - name: role_name + value: "cluster-admin-create" + - name: group_list + value: '["cluster-admin-create"]' + - - name: set-cluster-role-binding-read + templateRef: + name: k8s-client + template: create-cluster-role-binding + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: rolebinding_name + value: "cluster-admin-read-rb" + - name: role_name + value: "cluster-admin-read" + - name: group_list + value: '["cluster-admin-read"]' + - - name: set-cluster-role-binding-update + templateRef: + name: k8s-client + template: create-cluster-role-binding + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: rolebinding_name + value: "cluster-admin-update-rb" + - name: role_name + value: "cluster-admin-update" + - name: group_list + value: '["cluster-admin-update"]' + - - name: set-cluster-role-binding-delete + templateRef: + name: k8s-client + template: create-cluster-role-binding + arguments: + parameters: + - name: target_cluster_id + value: "{{workflow.parameters.cluster_id}}" + - name: is_self_target + value: "false" + - name: rolebinding_name + value: "cluster-admin-delete-rb" + - name: role_name + value: "cluster-admin-delete" + - name: group_list + value: '["cluster-admin-delete"]' + + - name: create-endpoint-secret + inputs: + parameters: + - name: cluster_domains + - name: cluster_id + container: + name: create-namespace + image: harbor.taco-cat.xyz/tks/hyperkube:v1.18.6 + command: + - /bin/bash + - '-c' + - | + CLUSTER_ID={{inputs.parameters.cluster_id}} + + GRAFANA_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("grafana"))|.url)'[]) + LOKI_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("loki"))|.url)'[]) + MINIO_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("minio"))|.url)'[]) + PROMETHEUS_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("prometheus"))|.url)'[]) + THANOS_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("thanos"))|.url)'[]) + LOKI_USER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("loki_user"))|.url)'[]) + THANOS_RULER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("thanos_ruler"))|.url)'[]) + KIALI_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("kiali"))|.url)'[]) + JAEGER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("jaeger"))|.url)'[]) + + cat < tks-endpoint-secret.yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: tks-endpoint-secret + namespace: ${CLUSTER_ID} + data: + grafana: $(echo ${GRAFANA_URL} | base64) # 30001 + loki: $(echo ${LOKI_URL} | base64) # 30002 + minio: $(echo ${MINIO_URL} | base64) # 30003 + prometheus: $(echo ${PROMETHEUS_URL} | base64) # 30004 + thanos: $(echo ${THANOS_URL} | base64) # 30005 (queryfrontend만 합시다...) + loki_user: $(echo ${LOKI_USER_URL} | base64) # 30006 + thanos_ruler: $(echo ${THANOS_RULER_URL} | base64) # 30007 + kiali: $(echo ${KIALI_URL} | base64) # 30011 + jaeger: $(echo ${JAEGER_URL} | base64) # 30012 + EOF + kubectl apply -f tks-endpoint-secret.yaml + env: + - name: CLUSTER_DOMAINS + value: "{{inputs.parameters.cluster_domains}}" + + activeDeadlineSeconds: 30 diff --git a/tks-cluster/import-usercluster-wftpl.yaml b/tks-cluster/import-usercluster-wftpl.yaml index 952798a8..a9be5b72 100644 --- a/tks-cluster/import-usercluster-wftpl.yaml +++ b/tks-cluster/import-usercluster-wftpl.yaml @@ -15,6 +15,10 @@ spec: value: "P0010010a" - name: cluster_id value: "C011b88fa" + - name: site_name + value: "{{workflow.parameters.cluster_id}}" + - name: app_prefix + value: "{{workflow.parameters.cluster_id}}" - name: kubeconfig value: "KUBECONFIG_string" - name: git_account @@ -25,6 +29,10 @@ spec: value: "NULL" - name: base_repo_branch value: "main" + - name: policy_ids + value: "" + - name: revision + value: "main" volumes: - name: kubeconfig-adm @@ -40,7 +48,7 @@ spec: - name: awsconfig secret: secretName: awsconfig-secret - + templates: - name: deploy steps: @@ -65,7 +73,7 @@ spec: parameters: - name: cluster_id value: "{{ workflow.parameters.cluster_id }}" - - name: kubeconfig + - name: kubeconfig value: "{{ workflow.parameters.kubeconfig }}" - - name: render-manifests @@ -80,6 +88,15 @@ spec: value: "{{ workflow.parameters.base_repo_branch }}" when: "{{steps.tks-get-cluster-info.outputs.parameters.cluster_type}} != ADMIN" + - - name: tks-create-config-secret + template: create-endpoint-secret + arguments: + parameters: + - name: cluster_domains + value: "{{steps.tks-get-cluster-info.outputs.parameters.cluster_domains}}" + - name: cluster_id + value: "{{ workflow.parameters.cluster_id }}" + - - name: init-cluster-for-tks template: init-cluster-for-tks arguments: @@ -95,10 +112,36 @@ spec: - name: contract_id value: "{{ workflow.parameters.contract_id }}" + - - name: suspend + template: suspend + + - - name: create-default-rbac-resources + templateRef: + name: create-tks-usercluster + template: k8s-rbac-setting + arguments: + parameters: + - name: cluster_id + value: "{{ workflow.parameters.cluster_id }}" + + - - name: install-policy-management + templateRef: + name: tks-policy + template: deploy + arguments: + parameters: + - name: cluster_id + value: "{{ workflow.parameters.cluster_id }}" + - name: contract_id + value: "{{ workflow.parameters.contract_id }}" + - name: policy_ids + value: "{{ workflow.parameters.policy_ids }}" ####################### # Template Definition # ####################### + - name: suspend + suspend: {} - name: import-cluster inputs: @@ -151,7 +194,7 @@ spec: - name: cluster_id container: name: cluster-init - image: harbor.taco-cat.xyz/tks/python_kubectl_argo:v1.1.0 + image: harbor.taco-cat.xyz/tks/tks-cluster-init:v1.0.0 command: - /bin/bash - '-exc' @@ -159,6 +202,20 @@ spec: cp /kube/value kubeconfig_adm export KUBECONFIG=kubeconfig_adm + ####### add tks info. on namespace ######## + kubectl label ns ${CLUSTER_ID} tks.io/organization=${CONTRACT_ID} + if [ $(kubectl get ns -l tks.io/organization=${CONTRACT_ID} --ignore-not-found=true | grep -v NAME | awk '{print $1}' | wc -l ) -le 1 ]; then + kubectl label ns ${CLUSTER_ID} tks.io/policy=${CLUSTER_ID} + else + POLICY_NS=$(kubectl get ns $(kubectl get ns -l tks.io/organization=${CONTRACT_ID} --ignore-not-found=true | grep -v NAME | awk '{print $1}' | head -n 1 ) --ignore-not-found=true -o jsonpath='{.metadata.labels.tks\.io\/policy}' ) + if [ -z "$POLICY_NS" ]; then + kubectl label ns ${CLUSTER_ID} tks.io/policy=${CLUSTER_ID} + else + kubectl label ns ${CLUSTER_ID} tks.io/policy=${POLICY_NS} + fi + fi + ########################################### + KUBECONFIG_WORKLOAD=$(kubectl get secret -n $CLUSTER_ID $CLUSTER_ID-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d) cat <<< "$KUBECONFIG_WORKLOAD" > kubeconfig_workload @@ -203,3 +260,52 @@ spec: - name: CLUSTER_ID value: "{{ inputs.parameters.cluster_id }}" + + - name: create-endpoint-secret + inputs: + parameters: + - name: cluster_domains + - name: cluster_id + container: + name: create-namespace + image: harbor.taco-cat.xyz/tks/hyperkube:v1.18.6 + command: + - /bin/bash + - '-c' + - | + CLUSTER_ID={{inputs.parameters.cluster_id}} + + GRAFANA_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("grafana"))|.url)'[]) + LOKI_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("loki"))|.url)'[]) + MINIO_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("minio"))|.url)'[]) + PROMETHEUS_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("prometheus"))|.url)'[]) + THANOS_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("thanos"))|.url)'[]) + LOKI_USER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("loki_user"))|.url)'[]) + THANOS_RULER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("thanos_ruler"))|.url)'[]) + KIALI_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("kiali"))|.url)'[]) + JAEGER_URL=$(echo $CLUSTER_DOMAINS | jq -r '. | map(select(.domainType | contains("jaeger"))|.url)'[]) + + cat < tks-endpoint-secret.yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: tks-endpoint-secret + namespace: ${CLUSTER_ID} + data: + grafana: $(echo ${GRAFANA_URL} | base64) # 30001 + loki: $(echo ${LOKI_URL} | base64) # 30002 + minio: $(echo ${MINIO_URL} | base64) # 30003 + prometheus: $(echo ${PROMETHEUS_URL} | base64) # 30004 + thanos: $(echo ${THANOS_URL} | base64) # 30005 (queryfrontend만 합시다...) + loki_user: $(echo ${LOKI_USER_URL} | base64) # 30006 + thanos_ruler: $(echo ${THANOS_RULER_URL} | base64) # 30007 + kiali: $(echo ${KIALI_URL} | base64) # 30011 + jaeger: $(echo ${JAEGER_URL} | base64) # 30012 + EOF + kubectl apply -f tks-endpoint-secret.yaml + env: + - name: CLUSTER_DOMAINS + value: "{{inputs.parameters.cluster_domains}}" + + activeDeadlineSeconds: 30 diff --git a/tks-cluster/remove-usercluster-wftpl.yaml b/tks-cluster/remove-usercluster-wftpl.yaml index ae4ce933..ce6e7471 100644 --- a/tks-cluster/remove-usercluster-wftpl.yaml +++ b/tks-cluster/remove-usercluster-wftpl.yaml @@ -190,6 +190,31 @@ spec: - name: app_name value: "{{workflow.parameters.app_prefix}}-argo-rollouts" + - - name: deletePolicyCR + template: deletePolicyCR + arguments: + parameters: + - name: target_namespace + value: "{{workflow.parameters.cluster_id}}" + + - - name: deletePolicyResourcesApp + templateRef: + name: delete-apps + template: DeleteAppsByName + arguments: + parameters: + - name: app_name + value: "{{workflow.parameters.app_prefix}}-policy-resources" + + - - name: deleteOpaGatekeeperApp + templateRef: + name: delete-apps + template: DeleteAppsByName + arguments: + parameters: + - name: app_name + value: "{{workflow.parameters.app_prefix}}-opa-gatekeeper" + - - name: deleteClusterCR template: deleteClusterCR arguments: @@ -207,24 +232,6 @@ spec: name: tks-delete-cluster-repo template: deleteClusterRepo - - - name: unset-keycloak-config - templateRef: - name: keycloak-client - template: delete-client - arguments: - parameters: - - name: server_url - value: "{{ workflow.parameters.keycloak_url }}" - - name: target_realm_name - value: "{{ workflow.parameters.contract_id }}" - - name: target_client_id - value: "{{ workflow.parameters.cluster_id}}-k8s-api" - - name: keycloak_credential_secret_name - value: "keycloak" - - name: keycloak_credential_secret_namespace - value: "keycloak" - - ####################### # Template Definition # @@ -386,3 +393,45 @@ spec: value: "{{inputs.parameters.target_namespace}}" - name: CLUSTER_NAME value: "{{workflow.parameters.cluster_id}}" + + - name: deletePolicyCR + inputs: + parameters: + - name: target_namespace + container: + name: delete-policy-cr + image: harbor.taco-cat.xyz/tks/tks-cluster-init:v1.0.0 + command: + - /bin/bash + - '-c' + - | + cp /kube/value kubeconfig_adm + export KUBECONFIG=kubeconfig_adm + + POLICIES=$(kubectl get tkspolicy -n ${TARGET_NAMESPACE} --ignore-not-found=true | grep -v NAME) + if [ -n "$POLICIES" ]; then + kubectl get tkspolicy -n ${TARGET_NAMESPACE} -o name | sed -e 's/.*\///g' | xargs -I {} kubectl delete tkspolicy {} -n ${TARGET_NAMESPACE} --wait=false + sleep 10 + + REMAINS=$(kubectl get tkspolicy -n ${TARGET_NAMESPACE} --ignore-not-found=true | grep -v NAME) + if [ -n "$REMAINS" ]; then + kubectl get tkspolicy -n ${TARGET_NAMESPACE} -o name | sed -e 's/.*\///g' | xargs -I {} kubectl get tkspolicy {} -n ${TARGET_NAMESPACE} -ojson | jq '.metadata.finalizers = null' | kubectl apply -f - + fi + fi + + POLICY_TEMPLATES=$(kubectl get tkspolicytemplates -n ${TARGET_NAMESPACE} --ignore-not-found=true | grep -v NAME) + if [ -n "$POLICY_TEMPLATES" ]; then + kubectl get tkspolicytemplates -n ${TARGET_NAMESPACE} -o name | sed -e 's/.*\///g' | xargs -I {} kubectl delete tkspolicytemplates {} -n ${TARGET_NAMESPACE} --wait=false + sleep 10 + + REMAINS=$(kubectl get tkspolicytemplates -n ${TARGET_NAMESPACE} --ignore-not-found=true | grep -v NAME) + if [ -n "$REMAINS" ]; then + kubectl get tkspolicytemplates -n ${TARGET_NAMESPACE} -o name | sed -e 's/.*\///g' | xargs -I {} kubectl get tkspolicytemplates {} -n ${TARGET_NAMESPACE} -ojson | jq '.metadata.finalizers = null' | kubectl apply -f - + fi + fi + env: + - name: TARGET_NAMESPACE + value: "{{inputs.parameters.target_namespace}}" + volumeMounts: + - name: kubeconfig-adm + mountPath: "/kube" \ No newline at end of file diff --git a/tks-stack/tks-stack-create.yaml b/tks-stack/tks-stack-create.yaml index f0acdc30..00992013 100644 --- a/tks-stack/tks-stack-create.yaml +++ b/tks-stack/tks-stack-create.yaml @@ -31,6 +31,10 @@ spec: value: develop - name: cluster_endpoint value: "" + - name: policy_ids + value: "" + - name: cluster_domains + value: "" templates: - name: main @@ -64,6 +68,10 @@ spec: value: "{{workflow.parameters.cloud_service}}" - name: cluster_endpoint value: "{{workflow.parameters.cluster_endpoint}}" + - name: policy_ids + value: "{{workflow.parameters.policy_ids}}" + - name: cluster_domains + value: "{{workflow.parameters.cluster_domains}}" - - name: call-create-appgroup-for-LMA templateRef: diff --git a/tks-stack/tks-stack-import.yaml b/tks-stack/tks-stack-import.yaml new file mode 100644 index 00000000..14c7fa0e --- /dev/null +++ b/tks-stack/tks-stack-import.yaml @@ -0,0 +1,96 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: tks-stack-import + namespace: argo +spec: + entrypoint: main + arguments: + parameters: + - name: tks_api_url + value: "http://tks-api.tks.svc:9110" + - name: organization_id + value: "" + - name: stack_template_id + value: "NULL" + - name: creator + value: "" + - name: description + value: "" + - name: object_store + value: "minio" + - name: base_repo_branch + value: develop + - name: policy_ids + value: "" + - name: cluster_domains + value: "" + - name: kubeconfig_string + value: "" + + templates: + - name: main + steps: + - - name: tks-get-stack-template-type + templateRef: + name: tks-get-stack-template-type + template: getTksGetStackTemplateType + + - - name: call-import-usercluster + templateRef: + name: tks-cli + template: import-usercluster + arguments: + parameters: + - name: cluster_name + value: "{{workflow.parameters.cluster_name}}" + - name: stack_template_id + value: "{{workflow.parameters.stack_template_id}}" + - name: organization_id + value: "{{workflow.parameters.organization_id}}" + - name: creator + value: "{{workflow.parameters.creator}}" + - name: description + value: "{{workflow.parameters.description}}" + - name: policy_ids + value: "{{workflow.parameters.policy_ids}}" + - name: cluster_domains + value: "{{workflow.parameters.cluster_domains}}" + - name: kubeconfig_string + value: "{{workflow.parameters.kubeconfig_string}}" + + - - name: call-create-appgroup-for-LMA + templateRef: + name: tks-cli + template: create-appgroup + arguments: + parameters: + - name: cluster_id + value: "{{steps.call-import-usercluster.outputs.parameters.cluster-id}}" + - name: name + value: "{{steps.call-import-usercluster.outputs.parameters.cluster-id}}_lma" + - name: type + value: "LMA" + - name: creator + value: "{{workflow.parameters.creator}}" + - name: description + value: "{{workflow.parameters.description}}" + + - - name: call-create-appgroup-for-SERVICEMESH + templateRef: + name: tks-cli + template: create-appgroup + arguments: + parameters: + - name: cluster_id + value: "{{steps.call-import-usercluster.outputs.parameters.cluster-id}}" + - name: name + value: "{{steps.call-import-usercluster.outputs.parameters.cluster-id}}_servicemesh" + - name: type + value: "SERVICE_MESH" + - name: creator + value: "{{workflow.parameters.creator}}" + - name: description + value: "{{workflow.parameters.description}}" + when: "{{steps.tks-get-stack-template-type.outputs.parameters.stack_template_type}} == MSA" + diff --git a/tks_info/apply-policies.yaml b/tks_info/apply-policies.yaml new file mode 100644 index 00000000..203452f5 --- /dev/null +++ b/tks_info/apply-policies.yaml @@ -0,0 +1,81 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: tks-apply-policies + namespace: argo +spec: + entrypoint: applyPolicies + arguments: + parameters: + - name: tks_api_url + value: "http://tks-api-dev.taco-cat.xyz" + - name: contract_id + value: "" + - name: cluster_id + value: "" + - name: policy_ids + value: "" + + templates: + - name: applyPolicies + inputs: + parameters: + - name: contract_id + - name: cluster_id + - name: policy_ids + + script: + image: harbor.taco-cat.xyz/tks/centos-tks-api:v1.0 + command: ["python"] + env: + - name: PYTHONPATH + value: "/opt/protobuf/:/opt/rh/rh-python38/root/lib/python3.8/site-packages/:/opt/app-root/lib/python3.8/site-packages/" + envFrom: + - secretRef: + name: "git-svc-token" + - secretRef: + name: "tks-api-secret" + source: | + import os + import requests + import sys + + TKS_API_URL = '{{workflow.parameters.tks_api_url}}' + ORGANIZATION_ID = '{{inputs.parameters.contract_id}}' + CLUSTER_ID = '{{inputs.parameters.cluster_id}}' + POLICY_IDS = '{{inputs.parameters.policy_ids}}' + + def getToken(): + data = { + 'organizationId': os.environ['ORGANIZATION_ID'], + 'accountId': os.environ['ACCOUNT_ID'], + 'password': os.environ['PASSWORD'] + } + + res = requests.post(TKS_API_URL + '/api/1.0/auth/login', json=data) + if res.status_code != 200: + logStr='response text: {}\n'.format(res.text) + with open(UPDATE_LOG, "a") as f: + f.write(wfRefStr) + f.write(loginErrStr) + sys.exit(loginErrStr) + res_json = res.json() + return res_json['user']['token'] + + TOKEN = getToken() + uri = '/api/1.0/organizations/%s/stacks/%s/policies' % (ORGANIZATION_ID, CLUSTER_ID) + + arrPolicies = POLICY_IDS.split(',') + + data = { + 'policyIds': arrPolicies + } + res = requests.post(TKS_API_URL + uri, + headers={"Authorization": "Bearer " + TOKEN}, + json=data) + if res.status_code != 200: + logStr='response text: {}\n'.format(res.text) + print(logStr) + + res_json = res.json() + print(res_json) diff --git a/tks_info/get-tks-cluster-wftpl.yaml b/tks_info/get-tks-cluster-wftpl.yaml index df7326f3..6def5d97 100644 --- a/tks_info/get-tks-cluster-wftpl.yaml +++ b/tks_info/get-tks-cluster-wftpl.yaml @@ -21,6 +21,9 @@ spec: - name: cluster_type valueFrom: path: /mnt/out/cluster_type.txt + - name: cluster_domains + valueFrom: + path: /mnt/out/cluster_domains.txt volumes: - name: out emptyDir: {} @@ -67,6 +70,7 @@ spec: print(res.text) clusterSiteValues = res.json()['clusterSiteValues'] + clusterDomains = clusterSiteValues['domains'] with open("/mnt/out/cluster_info.txt", "w") as f: #cluster_conf = str(clusterSiteValues) @@ -77,3 +81,7 @@ spec: cluster_type = clusterSiteValues['clusterType'] print(cluster_type) f.write(cluster_type) + with open("/mnt/out/cluster_domains.txt", "w") as f: + cluster_domains = json.dumps(clusterDomains) + print(cluster_domains) + f.write(cluster_domains) diff --git a/tks_info/get-tks-stack-template-wftpl.yaml b/tks_info/get-tks-stack-template-wftpl.yaml index a447f753..9590af32 100644 --- a/tks_info/get-tks-stack-template-wftpl.yaml +++ b/tks_info/get-tks-stack-template-wftpl.yaml @@ -58,7 +58,7 @@ spec: return resJson['user']['token'] - res = requests.get(TKS_API_URL+"/api/1.0/stack-templates/" + STACK_TEMPLATE_ID, headers={"Authorization": "Bearer " + getToken(), "Content-Type" : "application/json"} ) + res = requests.get(TKS_API_URL+"/api/1.0/admin/stack-templates/" + STACK_TEMPLATE_ID, headers={"Authorization": "Bearer " + getToken(), "Content-Type" : "application/json"} ) if res.status_code != 200 : sys.exit('Failed to get stackTemplate') diff --git a/tks_info/tks-check-node.yaml b/tks_info/tks-check-node.yaml index f2b2dac8..ee249c8b 100644 --- a/tks_info/tks-check-node.yaml +++ b/tks_info/tks-check-node.yaml @@ -58,7 +58,7 @@ spec: return resJson['user']['token'] - res = requests.get(TKS_API_URL+"/api/1.0/stack-templates/" + STACK_TEMPLATE_ID, headers={"Authorization": "Bearer " + getToken(), "Content-Type" : "application/json"} ) + res = requests.get(TKS_API_URL+"/api/1.0/admin/stack-templates/" + STACK_TEMPLATE_ID, headers={"Authorization": "Bearer " + getToken(), "Content-Type" : "application/json"} ) if res.status_code != 200 : sys.exit('Failed to get stackTemplate') diff --git a/tks_info/update-asa-endpoint-wftpl.yaml b/tks_info/update-asa-endpoint-wftpl.yaml index cdb90596..891088b9 100644 --- a/tks_info/update-asa-endpoint-wftpl.yaml +++ b/tks_info/update-asa-endpoint-wftpl.yaml @@ -24,6 +24,7 @@ spec: inputs: parameters: - name: organization_id + - name: project_id - name: asa_id - name: asa_task_id - name: endpoint @@ -50,6 +51,7 @@ spec: TKS_API_URL = '{{workflow.parameters.tks_api_url}}' ORG_ID = '{{inputs.parameters.organization_id}}' + PROJECT_ID = '{{inputs.parameters.project_id}}' APP_ID = '{{inputs.parameters.asa_id}}' TASK_ID = '{{inputs.parameters.asa_task_id}}' ENDPOINT_URL = '{{inputs.parameters.endpoint}}' @@ -74,16 +76,13 @@ spec: logStr='response text: {}\n'.format(res.text) with open(UPDATE_LOG, "a") as f: f.write(wfRefStr) - f.write(logStr) f.write(loginErrStr) - print(logStr) sys.exit(loginErrStr) res_json = res.json() return res_json['user']['token'] - TOKEN = getToken() - uri = '/api/1.0/organizations/%s/app-serve-apps/%s/endpoint' % (ORG_ID, APP_ID) + uri = '/api/1.0/organizations/%s/projects/%s/app-serve-apps/%s/endpoint' % (ORG_ID, PROJECT_ID, APP_ID) data = { 'taskId': TASK_ID, 'endpointUrl': ENDPOINT_URL, diff --git a/tks_info/update-asa-status-wftpl.yaml b/tks_info/update-asa-status-wftpl.yaml index ddf5bdc0..637f8524 100644 --- a/tks_info/update-asa-status-wftpl.yaml +++ b/tks_info/update-asa-status-wftpl.yaml @@ -15,6 +15,7 @@ spec: inputs: parameters: - name: organization_id + - name: project_id - name: asa_id - name: asa_task_id - name: status @@ -38,6 +39,7 @@ spec: TKS_API_URL = '{{workflow.parameters.tks_api_url}}' ORG_ID = '{{inputs.parameters.organization_id}}' + PROJECT_ID = '{{inputs.parameters.project_id}}' APP_ID = '{{inputs.parameters.asa_id}}' TASK_ID = '{{inputs.parameters.asa_task_id}}' STATUS = '{{inputs.parameters.status}}' @@ -59,7 +61,7 @@ spec: TOKEN = getToken() - uri = '/api/1.0/organizations/%s/app-serve-apps/%s/status' % (ORG_ID, APP_ID) + uri = '/api/1.0/organizations/%s/projects/%s/app-serve-apps/%s/status' % (ORG_ID, PROJECT_ID, APP_ID) data = { 'taskId': TASK_ID, 'status': STATUS, @@ -69,7 +71,7 @@ spec: headers={"Authorization": "Bearer " + TOKEN}, json=data) if res.status_code != 200: - print('text: {}\n'.format(res.text)) + print('Response: {}\n'.format(res.text)) sys.exit('Failed to update status') res_json = res.json()